prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:", time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
train_df = shuffle(train_df, random_state=1234);
train_df = train_df.iloc[:100000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p', 'rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p', 'rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns=['image_quality'])
incep_test_image_df = | pd.DataFrame(test_features, columns=[f'image_quality']) | pandas.DataFrame |
""" Core functions of the aecg package: tools for annotated ECG HL7 XML files
This submodule implements helper functions to validate and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict
from lxml import etree
from scipy.interpolate import interp1d
import datetime
import logging
import numpy as np
import os
import pandas as pd
# Python logging ==============================================================
logger = logging.getLogger(__name__)
# CONSTANTS ===================================================================
#: Defines column names for the validationResults DataFrame
VALICOLS = ["EGXFN", "VALIGRP", "PARAM",
"VALUE", "XPATH", "VALIMSG", "VALIOUT"]
#: Codes used in sequences
TIME_CODES = ["TIME_ABSOLUTE", "TIME_RELATIVE"]
#: Lead codes defined in the aECG HL7 standard and accepted by the aecg package
STD_LEADS = ["MDC_ECG_LEAD_I", "MDC_ECG_LEAD_II", "MDC_ECG_LEAD_III",
"MDC_ECG_LEAD_AVR", "MDC_ECG_LEAD_AVL", "MDC_ECG_LEAD_AVF",
"MDC_ECG_LEAD_V1", "MDC_ECG_LEAD_V2", "MDC_ECG_LEAD_V3",
"MDC_ECG_LEAD_V4", "MDC_ECG_LEAD_V5", "MDC_ECG_LEAD_V6",
"MDC_ECG_LEAD_X", "MDC_ECG_LEAD_Y", "MDC_ECG_LEAD_Z",
"MDC_ECG_LEAD_AVRneg", "MDC_ECG_LEAD_AVRNEG",
"MDC_ECG_LEAD_aVR", "MDC_ECG_LEAD_aVL", "MDC_ECG_LEAD_aVF", ]
#: Lead codes not in the aECG HL7 standard but accepted by the aecg package
KNOWN_NON_STD_LEADS = ["MORTARA_ECG_LEAD_TEA", "FDA_ECG_LEAD_VCGMAG"]
#: Codes accepted by the aecg package
SEQUENCE_CODES = TIME_CODES + STD_LEADS + KNOWN_NON_STD_LEADS
#: Display names for the lead codes defined in `aecg.core`
STD_LEADS_DISPLAYNAMES = {"MDC_ECG_LEAD_I": "I",
"MDC_ECG_LEAD_II": "II",
"MDC_ECG_LEAD_III": "III",
"MDC_ECG_LEAD_AVR": "aVR",
"MDC_ECG_LEAD_AVL": "aVL",
"MDC_ECG_LEAD_AVF": "aVF",
"MDC_ECG_LEAD_AVRneg": "-aVR",
"MDC_ECG_LEAD_AVRNEG": "-aVR",
"MDC_ECG_LEAD_V1": "V1",
"MDC_ECG_LEAD_V2": "V2",
"MDC_ECG_LEAD_V3": "V3",
"MDC_ECG_LEAD_V4": "V4",
"MDC_ECG_LEAD_V5": "V5",
"MDC_ECG_LEAD_V6": "V6",
"MORTARA_ECG_LEAD_TEA": "Mortara TEA",
"FDA_ECG_LEAD_VCGMAG": "VCGMAG",
"MDC_ECG_LEAD_aVR": "aVR",
"MDC_ECG_LEAD_aVL": "aVL",
"MDC_ECG_LEAD_aVF": "aVF", }
# XML and XPATH functions =====================================================
def new_validation_row(egxfile: str, valgroup: str, param: str) -> Dict:
"""Returns a new empty validation row
Args:
egxfile (str): filename of the xml file containing the aECG
valgroup (str): validation group
param (str): String with the parameter being assessed by the validator
Returns:
Dict: New empty validation row.
"""
validation_row = {
"EGXFN": egxfile,
"XPATH": "",
"VALIGRP": valgroup,
"PARAM": param,
"VALUE": "",
"VALIOUT": "",
"VALIMSG": ""
}
return validation_row
def validate_xpath(xmlnode: etree._ElementTree, xpath: str, ns: str, attr: str,
valrow: Dict, failcat: str = "ERROR") -> Dict:
""" Populates valrow with validation results
Populates valrow with validation results of the attribute in the node
specified by xpath expression
Args:
xmlnode (etree._ElementTree): root or parent xmlnode
xpath (str): xpath expression to search for
ns (str): namespace for xpath
attr (str): String with the attribute for wihc retrieve the value. If
empty, the text value of the first node (if found) is used instead.
valrow (Dict): initialized validation row where populate validation
result.
failcat (str): string with validation output category when validation
fails (i.e., ERROR or WARNING)
Returns:
Dict: Validation row populated with the validation results.
"""
valrow["XPATH"] = xpath
if ns != "":
valnodes = xmlnode.xpath(xpath.replace("/", "/ns:"),
namespaces={"ns": ns})
else:
valnodes = xmlnode.xpath(xpath)
valrow["VALIOUT"] = "ERROR"
valrow[
"VALIMSG"] = "Validation unknown error parsing xpath expression in XML"
if len(valnodes) == 1:
valnode = valnodes[0]
if attr == "":
txt = valnode.text
if txt is None:
txt = ""
valrow["VALIOUT"] = failcat
valrow[
"VALIMSG"] = "Node found but value is missing or empty" \
" string"
else:
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
valrow["VALUE"] = txt
else:
txt = valnode.get(attr)
if txt is None:
txt = ""
valrow["VALIOUT"] = failcat
valrow["VALIMSG"] = "Node found but attribute is missing"
else:
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
valrow["VALUE"] = txt
else:
if len(valnodes) > 1:
valrow["VALIOUT"] = failcat
valrow["VALIMSG"] = "Multiple nodes in XML"
else:
valrow["VALIOUT"] = failcat
valrow["VALIMSG"] = "Node not found"
return valrow
# Other helper functions =====================================================
def get_aecg_schema_location() -> str:
""" Returns the full path to the HL7 aECG xsd schema files included in aecg
"""
xsd_filename = os.path.normpath(
os.path.join(
os.path.dirname(__file__),
"data/hl7/2003-12 Schema/schema/PORT_MT020001.xsd"))
return xsd_filename
# aECG classes ================================================================
class AecgLead:
"""
Sampled voltage values and related information recorded from an ECG lead.
Args:
Attributes:
leadname: Lead name as originally included in the aECG xml file.
origin: Origin of the value scale, i.e., the physical quantity
that a zero-digit would represent in the sequence of digit values.
origin_unit: Units of the origin value.
scale: A ratio-scale quantity that is factored out of the sequence of
digit values.
scale_unit: Units of the scale value.
digits: List of sampled values.
LEADTIME: (optional) Time when the lead was recorded
"""
def __init__(self):
self.leadname = ""
self.origin = 0
self.origin_unit = "uV"
self.scale = 1
self.scale_unit = "uV"
self.digits = []
self.LEADTIME = {"code": "", "head": "", "increment": "", "unit": ""}
def display_name(self):
if self.leadname in STD_LEADS:
return STD_LEADS_DISPLAYNAMES[self.leadname.upper()]
return self.leadname
class AecgAnnotationSet:
"""
Annotation set for a given ECG waveform.
Args:
Attributes:
person: Name of the person who performed the annotations.
device: Model and name of the device used to perform the annotations.
anns: Annotations
"""
def __init__(self):
self.person = ""
self.device = {"model": "", "name": ""}
self.anns = []
class Aecg:
"""
An annotated ECG (aECG)
Attributes:
filename (str): filename including path to the XML file where the Aecg
is stored. This could be in the filesystem or within the zip file
specified in the zipContainer attribute.
zipContainer (str): filename of the zip file where the XML specified by
the filename attribute is stored. If empty string ("") then the
filename is stored in the filesystem and not in a zip file.
isValid (str): Indicates whether the original XML file passed XML
schema validation ("Y"), failed ("N") or has not been validated
("").
xmlfound (bool): Indicates whether the XML file was found, loaded and
parsed into an xml document
xmldoc (etree._ElementTree): The XML document containing the annotated
ECG information.
UUID (str): Annotated ECG universal unique identifier
EGDTC (Dict): Date and time of collection of the annotated ECG.
DEVICE (Dict): Dictionary containing device information (i.e.,
manufacturer, model, software)
USUBJID (Dict): Unique subject identifier.
SEX (str): Sex of the subject.
BIRTHTIME (str): Birthtime in HL7 date and time format.
RACE (str): Race of the subject.
TRTA (str): Assigned treatment.
STUDYID (Dict): Study identifier.
STUDYTITLE (str): Title of the study.
TPT (Dict): Absolute timepoint or study event information.
RTPT (Dict): Relative timepoint or study event relative to a reference
event.
PTPT (Dict): Protocol timepoint information.
RHYTHMID (Dict): Unique identifier of the rhythm waveform.
RHYTHMCODE (Dict): Code of the rhythm waveform (it should be "RHYTHM").
RHYTHMEGDTC (Dict): Date and time of collection of the rhythm waveform.
RHYTHMTIME (Dict): Time and sampling frequency information of the
rhythm waveform.
RHYTHMLEADS (List[AecgLead]): ECG leads of the rhythm waveform.
RHYTHMANNS (List[AecgAnnotationSet]): Annotation sets for the RHYTHM
waveform.
DERIVEDID (Dict): Unique identifier of the derived ECG waveform
DERIVEDCODE (Dict): Code of the derived waveform (supported code is
"REPRESENTATIVE_BEAT").
DERIVEDEGDTC (Dict): Date and time of collection of the derived
waveform.
DERIVEDTIME (Dict): Time and sampling frequency information of the
derived waveform.
DERIVEDLEADS (List[AecgLead]): ECG leads of the derived waveform.
DERIVEDANNS (List[AecgAnnotationSet]): Annotation sets for the derived
waveform.
validatorResults (pd.DataFrame): validation log generated when
reading the file.
"""
def __init__(self):
# Datasource
self.filename = ""
self.zipContainer = ""
self.isValid = ""
self.xmlfound = False
self.xmldoc = None
# General ECG information
self.UUID = ""
self.EGDTC = {"low": "", "center": "", "high": ""}
self.DEVICE = {"manufacturer": "", "model": "", "software": ""}
# Subject information
self.USUBJID = {"extension": "", "root": ""}
self.SEX = ""
self.BIRTHTIME = ""
self.RACE = ""
# Treatment information
self.TRTA = ""
# Clinical trial information
self.STUDYID = {"extension": "", "root": ""}
self.STUDYTITLE = ""
# Absolute timepoint information
self.TPT = {"code": "", "low": "", "high": "", "displayName": "",
"reasonCode": ""}
# Relative timepoint information
self.RTPT = {"code": "", "displayName": "", "pauseQuantity": "",
"pauseQuantity_unit": ""}
# Protocol timepoint information
self.PTPT = {"code": "", "displayName": "", "referenceEvent": "",
"referenceEvent_displayName": ""}
# Rhythm waveforms and annotations
self.RHYTHMID = {"extension": "", "root": ""}
self.RHYTHMCODE = {"code": "", "displayName": ""}
self.RHYTHMEGDTC = {"low": "", "high": ""}
self.RHYTHMTIME = {"code": "", "head": "", "increment": "", "unit": ""}
self.RHYTHMLEADS = []
self.RHYTHMANNS = []
# Derived waveforms and annotations
self.DERIVEDID = {"extension": "", "root": ""}
self.DERIVEDCODE = {"code": "", "displayName": ""}
self.DERIVEDEGDTC = {"low": "", "high": ""}
self.DERIVEDTIME = {"code": "", "head": "",
"increment": "", "unit": ""}
self.DERIVEDLEADS = []
self.DERIVEDANNS = []
# Validator results when reading and parsing the aECG XML
self.validatorResults = pd.DataFrame()
def xmlstring(self):
"""Returns the :attr:`xmldoc` as a string
Returns:
str: Pretty string of :attr:`xmldoc`
"""
if self.xmldoc is not None:
return etree.tostring(self.xmldoc, pretty_print=True).\
decode("utf-8")
else:
return "N/A"
def rhythm_as_df(self, new_fs: float = None) -> pd.DataFrame:
"""Returns the rhythm waveform as a dataframe
Transform the rhythm waveform in a matrix with time in ms and
digits values as physical values in mV. If `new_fs` is provided,
the transformation also resamples the waveform to the sampling
frequency specified in `new_fs` in Hz.
Args:
new_fs (float, optional): New sampling frequency in Hz. Defaults to
None.
Returns:
pd.DataFrame: rhythm waveform in a matrix with time in ms and
digits values as physical values in mV.
"""
ecg_data = pd.DataFrame()
if len(self.RHYTHMLEADS) > 0:
ecg_start_time = parse_hl7_datetime(self.RHYTHMEGDTC["low"])
tmp = [lead_mv_per_ms(ecg_start_time, ecg_lead, new_fs)
for ecg_lead in self.RHYTHMLEADS]
# Few aECGs have duplicate leads, so we drop them before returning
# the final dataframe
tmp_df = pd.concat(tmp).drop_duplicates()
ecg_data = tmp_df.pivot(index="TIME", columns="LEADNAM",
values="VALUE").reset_index()
return ecg_data
def derived_as_df(self, new_fs: float = None) -> pd.DataFrame:
"""Returns the derived waveform as a dataframe
Transform the derived waveform in a matrix with time in ms and
digits values as physical values in mV. If `new_fs` is provided,
the transformation also resamples the waveform to the sampling
frequency specified in `new_fs` in Hz.
Args:
new_fs (float, optional): New sampling frequency in Hz. Defaults to
None.
Returns:
pd.DataFrame: derived waveform in a matrix with time in ms and
digits values as physical values in mV.
"""
ecg_data = | pd.DataFrame() | pandas.DataFrame |
"""
Script to use the static chunks found by `StaticFinder` to calibrate accelerometer raw data so that gravity drift is minimized.
Prerequiste:
Run `StaticFinder` before using this script
Usage:
pad -p <PID> -r <root> process -p <PATTERN> --par AccelerometerCalibrator <options>
options:
--static_chunks <path>: the filepath (relative to root folder or absolute path) that contains the static chunks found by `StaticFinder`. User must provide this information in order to use the script.
--output_folder <folder name>: the folder name that the script will save calibrated data to in a participant's Derived folder. User must provide this information in order to use the script.
output:
The command will not print any output to console. The command will save the calibrated hourly files to the <output_folder>
Examples:
1. Calibrate the Actigraph raw data files for participant SPADES_1 in parallel and save it to a folder named 'Calibrated' in the 'Derived' folder of SPADES_1
pad -p SPADES_1 process AccelerometerCalibrator --par -p MasterSynced/**/Actigraph*.sensor.csv --output_folder Calibrated --static_chunks SPADES_1/Derived/static_chunks.csv
2. Calibrate the Actigraph raw data files for all participants in a dataset in parallel and save it to a folder named 'Calibrated' in the 'Derived' folder of each participant
pad process AccelerometerCalibrator --par -p MasterSynced/**/Actigraph*.sensor.csv -output_folder Calibrated --static_chunks DerivedCrossParticipants/static_chunks.csv
"""
import os
import pandas as pd
from .. import api as mhapi
from ..api import utils as mu
from .BaseProcessor import SensorProcessor
from ..utility import logger
def build(**kwargs):
return AccelerometerCalibrator(**kwargs).run_on_file
class AccelerometerCalibrator(SensorProcessor):
def __init__(self, verbose=True, independent=True, violate=False, static_chunks=None, output_folder=None):
SensorProcessor.__init__(self, verbose=verbose, independent=independent, violate=violate)
self.name = 'AccelerometerCalibrator'
self.static_chunks = static_chunks
self.output_folder = output_folder
def _run_on_data(self, combined_data, data_start_indicator, data_stop_indicator):
if self.static_chunks is None:
logger.warn('static chunks file is not provided, return the original data')
return combined_data
pid = self.meta['pid']
sid = self.meta['sid']
static_chunks = os.path.abspath(self.static_chunks)
static_chunks = pd.read_csv(self.static_chunks, parse_dates=[0], infer_datetime_format=True)
if self.violate:
selected_static_chunks = static_chunks.loc[static_chunks['pid'] == pid,:]
else:
selected_static_chunks = static_chunks.loc[(static_chunks['id'] == sid) & (static_chunks['pid'] == pid),:]
chunk_count = selected_static_chunks.groupby(['WINDOW_ID', 'COUNT', 'date', 'hour']).count().shape[0]
if self.verbose:
logger.info("Found " + str(chunk_count) + " static chunks")
if chunk_count < 9:
logger.warn("Need at least 9 static chunks for calibration, skip and use original data")
calibrated_df = combined_data
else:
calibrated_df = mhapi.Calibrator(combined_data, max_points=100, verbose=self.verbose).set_static(selected_static_chunks).run().calibrated
return calibrated_df
def _post_process(self, result_data):
if self.output_folder is None:
logger.warn('output_folder is not provided, no hourly calibrated file will be saved')
return pd.DataFrame()
output_file = mu.generate_output_filepath(self.file, setname=self.output_folder, newtype='sensor')
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
result_data.to_csv(output_file, index=False, float_format='%.9f')
if self.verbose:
logger.info('Saved calibrated data to ' + output_file)
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import random
MAXLIFE = 120
SCALE = 1
RESCALE = 1
true_rul = []
test_engine_id = 0
training_engine_id = 0
def kink_RUL(cycle_list, max_cycle):
'''
Piecewise linear function with zero gradient and unit gradient
^
|
MAXLIFE |-----------
| \
| \
| \
| \
| \
|----------------------->
'''
knee_point = max_cycle - MAXLIFE
kink_RUL = []
stable_life = MAXLIFE
for i in range(0, len(cycle_list)):
if i < knee_point:
kink_RUL.append(MAXLIFE)
else:
tmp = kink_RUL[i - 1] - (stable_life / (max_cycle - knee_point))
kink_RUL.append(tmp)
return kink_RUL
def compute_rul_of_one_id(FD00X_of_one_id, max_cycle_rul=None):
'''
Enter the data of an engine_id of train_FD001 and output the corresponding RUL (remaining life) of these data.
type is list
'''
cycle_list = FD00X_of_one_id['cycle'].tolist()
if max_cycle_rul is None:
max_cycle = max(cycle_list) # Failure cycle
else:
max_cycle = max(cycle_list) + max_cycle_rul
# print(max(cycle_list), max_cycle_rul)
# return kink_RUL(cycle_list,max_cycle)
return kink_RUL(cycle_list, max_cycle)
def compute_rul_of_one_file(FD00X, id='engine_id', RUL_FD00X=None):
'''
Input train_FD001, output a list
'''
rul = []
# In the loop train, each id value of the 'engine_id' column
if RUL_FD00X is None:
for _id in set(FD00X[id]):
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id]))
return rul
else:
rul = []
for _id in set(FD00X[id]):
# print("#### id ####", int(RUL_FD00X.iloc[_id - 1]))
true_rul.append(int(RUL_FD00X.iloc[_id - 1]))
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id], int(RUL_FD00X.iloc[_id - 1])))
return rul
def get_CMAPSSData(save=False, save_training_data=True, save_testing_data=True, files=[1, 2, 3, 4, 5],
min_max_norm=False):
'''
:param save: switch to load the already preprocessed data or begin preprocessing of raw data
:param save_training_data: same functionality as 'save' but for training data only
:param save_testing_data: same functionality as 'save' but for testing data only
:param files: to indicate which sub dataset needed to be loaded for operations
:param min_max_norm: switch to enable min-max normalization
:return: function will save the preprocessed training and testing data as numpy objects
'''
if save == False:
return np.load("normalized_train_data.npy"), np.load("normalized_test_data.npy"), pd.read_csv(
'normalized_train_data.csv', index_col=[0]), pd.read_csv('normalized_test_data.csv', index_col=[0])
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
if save_training_data: ### Training ###
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD002 = pd.read_table("./CMAPSSData/train_FD002.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD004 = pd.read_table("./CMAPSSData/train_FD004.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD002.columns = column_name
train_FD003.columns = column_name
train_FD004.columns = column_name
previous_len = 0
frames = []
for data_file in ['train_FD00' + str(i) for i in files]: # load subdataset by subdataset
#### standard normalization ####
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
# print("std", std)
################################
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file))
current_len = len(eval(data_file))
# print(eval(data_file).index)
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
# print(eval(data_file).index)
frames.append(eval(data_file))
print(data_file)
train = pd.concat(frames)
global training_engine_id
training_engine_id = train['engine_id']
train = train.drop('engine_id', 1)
train = train.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# train = train.drop('setting3', 1)
# train = train.drop('s18', 1)
# train = train.drop('s19', 1)
train_values = train.values * SCALE
np.save('normalized_train_data.npy', train_values)
train.to_csv('normalized_train_data.csv')
###########
else:
train = pd.read_csv('normalized_train_data.csv', index_col=[0])
train_values = train.values
if save_testing_data: ### testing ###
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD002 = pd.read_table("./CMAPSSData/test_FD002.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD004 = pd.read_table("./CMAPSSData/test_FD004.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD002.columns = column_name
test_FD003.columns = column_name
test_FD004.columns = column_name
# load RUL data
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD002 = pd.read_table("./CMAPSSData/RUL_FD002.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD004 = pd.read_table("./CMAPSSData/RUL_FD004.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD002.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD004.columns = ['RUL']
previous_len = 0
frames = []
for (data_file, rul_file) in [('test_FD00' + str(i), 'RUL_FD00' + str(i)) for i in files]:
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file), RUL_FD00X=eval(rul_file))
current_len = len(eval(data_file))
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
frames.append(eval(data_file))
print(data_file)
if len(files) == 1:
global test_engine_id
test_engine_id = eval(data_file)['engine_id']
test = | pd.concat(frames) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
#<NAME>/ <EMAIL>
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy import stats
df = pd.read_csv("chocolateWeightMMT3field.txt", parse_dates = ['Reading'],
na_values=['-999'], delim_whitespace=True)
df.columns = ['time','weight','grams']
df.drop(df.index[0], inplace=True)
df = df.reset_index(drop=True)
df.time = pd.to_timedelta(df.time)
l_array=len(df)
time_array=np.arange(l_array)
n=0
for i in time_array:
time_array[n]=datetime.timedelta.total_seconds(df.time[n]-df.time[0])
n=n+1
df = df.drop("time", axis=1)
time_array=pd.DataFrame(data=time_array)
df_array= | pd.concat([time_array,df],axis=1) | pandas.concat |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import numpy as np
from pandas.core.api import (Index, Series, TimeSeries, DataFrame, isnull)
import pandas.core.datetools as datetools
from pandas.util.testing import assert_series_equal
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# Series test cases
class TestSeries(unittest.TestCase):
def setUp(self):
self.ts = common.makeTimeSeries()
self.series = common.makeStringSeries()
self.objSeries = common.makeObjectSeries()
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(common.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assertRaises(Exception, Series, [0, 1, 2], index=None)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_corner(self):
df = common.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(common.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, series._set_index, None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, series._set_index,
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series.fromValue(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
strings = Series.fromValue('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
d = datetime.now()
dates = Series.fromValue(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
def test_contains(self):
common.assert_contains_all(self.ts.index, self.ts)
def test_save_load(self):
self.series.save('tmp1')
self.ts.save('tmp3')
unp_series = Series.load('tmp1')
unp_ts = Series.load('tmp3')
os.remove('tmp1')
os.remove('tmp3')
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(Exception, self.ts.__getitem__, d),
def test_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(common.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(common.randn(1000), index=np.arange(1000)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
def test_toString(self):
from cStringIO import StringIO
self.ts.toString(buffer=StringIO())
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
self.assert_(self.ts.keys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_stats(self):
self.series[5:15] = np.NaN
s1 = np.array(self.series)
s1 = s1[-np.isnan(s1)]
self.assertEquals(np.min(s1), self.series.min())
self.assertEquals(np.max(s1), self.series.max())
self.assertEquals(np.sum(s1), self.series.sum())
self.assertEquals(np.mean(s1), self.series.mean())
self.assertEquals(np.std(s1, ddof=1), self.series.std())
self.assertEquals(np.var(s1, ddof=1), self.series.var())
try:
from scipy.stats import skew
common.assert_almost_equal(skew(s1, bias=False),
self.series.skew())
except ImportError:
pass
self.assert_(not np.isnan(np.sum(self.series)))
self.assert_(not np.isnan(np.mean(self.series)))
self.assert_(not np.isnan(np.std(self.series)))
self.assert_(not np.isnan(np.var(self.series)))
self.assert_(not np.isnan(np.min(self.series)))
self.assert_(not np.isnan(np.max(self.series)))
self.assert_(np.isnan(Series([1.], index=[1]).std()))
self.assert_(np.isnan(Series([1.], index=[1]).var()))
self.assert_(np.isnan(Series([1.], index=[1]).skew()))
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op):
cython_or_numpy = op(series, other)
python = series.combineFunc(other, op)
common.assert_almost_equal(cython_or_numpy, python)
def check(other):
_check_op(other, operator.add)
_check_op(other, operator.sub)
_check_op(other, operator.div)
_check_op(other, operator.mul)
_check_op(other, operator.pow)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.div(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x))
check(self.ts * 2)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
deltas = Series([timedelta(1)] * 5, index=np.arange(5))
sub_deltas = deltas[::2]
deltas5 = deltas * 5
deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
common.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
self.assertRaises(Exception, self.ts.__pow__, df)
def test_combineFirst(self):
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combineFirst(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combineFirst(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = common.makeStringIndex(20)
floats = Series( | common.randn(20) | pandas.util.testing.randn |
#-------------------------------------------------------------------------------
# Name: GIS Viewer Attribution Evaluation
# Version: V_2.0
# Purpose: Produce report for installation geodatabase detailing data attribution
#
# Author: <NAME>
#
# Created: 2018/01/26
# Last Update: 2018/03/22
# Description: Evaluate installation geodatabases for minimum attribution required
# by AFCEC GIS viewer for best display of data.
#-------------------------------------------------------------------------------
# Import modules
import arcpy, os, numpy, pandas
from pandas import DataFrame
from datetime import datetime
# Start time
timenow = datetime.now()
print(timenow)
# WHICH FEATURE DATASETS ARE MISSING FROM THE INSTALLATION DATABASE COMPARED TO COMPARISON DATABASE
missFDSTable = arcpy.GetParameterAsText(0)
# WITHIN THE FEATURE DATASETS THAT THE INSTALLATION HAS,
# WHICH FEATURE CLASSES ARE MISSING?
missFCTable = arcpy.GetParameterAsText(1)
# WITHIN EACH REQUIRED FEATURE DATASET AND FEATURE CLASS THAT THE INSTALLATION HAS,
# WHICH FIELDS ARE MISSING?
missFLDTable = arcpy.GetParameterAsText(2)
# WITHIN EACH REQUIRED FEATURE DATASET AND FEATURE CLASS THAT THE INSTALLATION HAS,
# WHICH FIELDS ARE MISSING?
nullTable = arcpy.GetParameterAsText(3)
outputFile = arcpy.GetParameterAsText(4)
# =============================================================================
# missFDSTable = os.path.join(installGDB,"CIP_MissingFDS")
# missFCTable = os.path.join(installGDB,"CIP_MissingFCs")
# missFLDTable = os.path.join(installGDB,"CIP_MissingFields")
# nullTable = os.path.join(installGDB,"CIP_MissingData")
# =============================================================================
# to get dataframe of feature datasets, feature classes, and fields of geodatabase
def getFeaturesdf(GDB):
'''
# to get unique FDS, FC, and FIELDS across a geodatabase
Parameters
----------
GDB = path to GDB
Returns
-------
pandas dataframe of with two columns: Feature Dataset, Feature Class for each fc in gdb.
'''
d = pandas.DataFrame([])
arcpy.env.workspace = GDB
for theFDS in arcpy.ListDatasets():
for theFC in arcpy.ListFeatureClasses(feature_dataset=theFDS):
minFields = (fld.name.upper() for fld in arcpy.ListFields(os.path.join(GDB,theFDS,theFC)) if str(fld.name) not in ['Shape', 'OBJECTID', 'Shape_Length', 'Shape_Area'])
minFields = list(minFields)
for FLD in minFields:
d = d.append(pandas.DataFrame({'FDS': str(theFDS), 'FC': str(theFC), 'FLD': str(FLD.name)}, index=[0]), ignore_index=True)
return(d)
# to get field name of a ArcGIS table
def get_field_names(table):
"""
Get a list of field names not inclusive of the geometry and object id fields.
Parameters
----------
table: Table readable by ArcGIS
Returns
-------
List of field names.
"""
# list to store values
field_list = []
# iterate the fields
for field in arcpy.ListFields(table):
# if the field is not geometry nor object id, add it as is
if field.type != 'Geometry' and field.type != 'OID':
field_list.append(field.name)
# if geomtery is present, add both shape x and y for the centroid
elif field.type == 'Geometry':
field_list.append('SHAPE@XY')
# return the field list
return field_list
# to convert arcgis table to pandas dataframe
def table_to_pandas_dataframe(table, field_names=None):
"""
Load data into a Pandas Data Frame from esri geodatabase table for subsequent analysis.
Parameters
----------
table = Table readable by ArcGIS.
field_names: List of fields.
Returns
-------
Pandas DataFrame object.
"""
# if field names are not specified
if not field_names:
# get a list of field names
field_names = get_field_names(table)
# create a pandas data frame
dataframe = DataFrame(columns=field_names)
# use a search cursor to iterate rows
with arcpy.da.SearchCursor(table, field_names) as search_cursor:
# iterate the rows
for row in search_cursor:
# combine the field names and row items together, and append them
dataframe = dataframe.append(
dict(zip(field_names, row)),
ignore_index=True
)
# return the pandas data frame
return dataframe
def get_geodatabase_path(input_table):
'''Return the Geodatabase path from the input table or feature class.
:param input_table: path to the input table or feature class
'''
workspace = os.path.dirname(input_table)
if [any(ext) for ext in ('.gdb', '.mdb', '.sde') if ext in os.path.splitext(workspace)]:
return workspace
else:
return os.path.dirname(workspace)
# to get a pandas dataframe into a arcgis table
def pandas_to_table(pddf,tablename):
'''
Parameters
----------
pddf = pandas dataframe
tablename = output table name to 'installGDB'
Returns
-------
a geodatabase table from pandas dataframe inside 'installGDB' geodatabase object (string to .gdb path)
'''
x = numpy.array(numpy.rec.fromrecords(pddf))
names = pddf.dtypes.index.tolist()
x.dtype.names = tuple(names)
gdbTbl = os.path.join(installGDB,tablename)
if arcpy.Exists(gdbTbl):
arcpy.Delete_management(gdbTbl)
arcpy.da.NumPyArrayToTable(x, gdbTbl)
def summariseMissingData(installGDB):
start_time = datetime.now()
arcpy.env.workspace = installGDB
installationName = os.path.splitext(os.path.basename(installGDB))[0]
tb = os.path.splitext(os.path.basename(nullTable))[0]
compName = tb.split("_")[0]
# output table names, with comparison geodatabase name prepended
# missingFDTblName=compName+"_MissingFDS"
# missingFCTblName=compName+"_MissingFCs"
# missingFLDTblName=compName+"_MissingFields"
# nullTableName=compName+"_MissingData"
#
## CONVERT TABLES TO PANDAS DATAFRAMES
pdNullTbl= table_to_pandas_dataframe(nullTable, field_names=None)
pdFLDTbl= table_to_pandas_dataframe(missFLDTable, field_names=None)
pdFCTbl= table_to_pandas_dataframe(missFCTable, field_names=None)
pdFDSTbl= table_to_pandas_dataframe(missFDSTable, field_names=None)
# replace cells with '' as NaN
pdNullTbl = pdNullTbl.replace('', numpy.nan)
pdFLDTbl = pdFLDTbl.replace('', numpy.NaN)
pdFCTbl = pdFCTbl.replace('', numpy.NaN)
pdFDSTbl = pdFDSTbl.replace('', numpy.NaN)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE INDETERMINANT
arcpy.AddMessage ("Getting count of indeterminant cells per feature class")
indtCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['TOTAL_INDT_COUNT'].agg('sum').fillna(0).reset_index()
indtCntByFC=pandas.DataFrame(indtCntByFC)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE INDETERMINANT
arcpy.AddMessage ("Getting count of indeterminant cells per feature class")
detCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['TOTAL_DET_COUNT'].agg('sum').fillna(0).reset_index()
detCntByFC=pandas.DataFrame(detCntByFC)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE NULL
## THEN EXPORT THEM TO THE GEODATABASE
arcpy.AddMessage ("Getting count of 'null' cells per feature class")
nullCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['NULL_FC_COUNT'].agg('sum').fillna(0).reset_index()
nullCntByFC=pandas.DataFrame(nullCntByFC)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE TBD
## THEN EXPORT THEM TO THE GEODATABASE
arcpy.AddMessage ("Getting count of 'tbd' cells per feature class")
tbdCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['TBD_FC_COUNT'].agg('sum').fillna(0).reset_index()
tbdCntByFC=pandas.DataFrame(tbdCntByFC)
# FOR EACH FEATURE CLASS, GET COUNT OF CELLS THAT ARE OTHER
## THEN EXPORT THEM TO THE GEODATABASE
arcpy.AddMessage ("Getting count of 'other' cells per feature class")
otherCntByFC = pdNullTbl.groupby(['FDS','FC','INSTALLATION'])['OTHER_FC_COUNT'].agg('sum').fillna(0).reset_index()
otherCntByFC= | pandas.DataFrame(otherCntByFC) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Plots model, forecast, and residual plots for the
# given df model.
# Should have columns:
# ts - time series data
# model - model fit on training data
# forecast - forecasted values
# Date - dates
def plot_forecast(df, title, summary=True):
## residuals
df["residuals"] = df["ts"] - df["model"]
df["error"] = df["ts"] - df["forecast"]
df["error_pct"] = df["error"] / df["ts"]
## kpi
residuals_mean = df["residuals"].mean()
residuals_std = df["residuals"].std()
error_mean = df["error"].mean()
error_std = df["error"].std()
mae = df["error"].apply(lambda x: np.abs(x)).mean()
mape = df["error_pct"].apply(lambda x: np.abs(x)).mean()
mse = df["error"].apply(lambda x: x ** 2).mean()
rmse = np.sqrt(mse)
# # intervals
# df["conf_int_low"] = df["forecast"] - 1.96 * residuals_std
# df["conf_int_up"] = df["forecast"] + 1.96 * residuals_std
# df["pred_int_low"] = df["forecast"] - 1.96 * error_std
# df["pred_int_up"] = df["forecast"] + 1.96 * error_std
# plot
fig = plt.figure(figsize=(10,6))
fig.suptitle(title, fontsize=20)
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2, sharey=ax1)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
# Model fit
df[ | pd.notnull(df["model"]) | pandas.notnull |
import warnings
import logging
warnings.filterwarnings('ignore', category=FutureWarning)
from .index import build as build_index
from .index import build_from_matrix, LookUpBySurface, LookUpBySurfaceAndContext
from .embeddings.base import load_embeddings, EmbedWithContext
from .ground_truth.data_processor import WikipediaDataset, InputExample, convert_examples_to_features
import click
import numpy as np
import pandas as pd
import dask.dataframe as dd
from tqdm import tqdm as tqdm
import pyarrow as pa
import pyarrow.parquet as pq
from pathlib import Path
from qurator.utils.parallel import run as prun
from numpy.linalg import norm
from numpy.matlib import repmat
import json
import sqlite3
from sklearn.utils import shuffle
from qurator.sbb_ner.models.tokenization import BertTokenizer
from multiprocessing import Semaphore
logger = logging.getLogger(__name__)
@click.command()
@click.argument('all-entities-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding-type', type=click.Choice(['fasttext', 'bert', 'flair']), required=True, nargs=1)
@click.argument('entity-type', type=str, required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('output-path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--n-processes', type=int, default=6, help='Number of parallel processes. default: 6.')
@click.option('--distance-measure', type=click.Choice(['angular', 'euclidean']), default='angular',
help="default: angular")
@click.option('--split-parts', type=bool, is_flag=True, help="Process entity surfaces in parts.")
@click.option('--model-path', type=click.Path(exists=True),
default=None, help="From where to load the embedding model.")
@click.option('--layers', type=str, default="-1,-2,-3,-4", help="Which layers to use. default -1,-2,-3,-4")
@click.option('--pooling', type=str, default="first", help="How to pool the output for different tokens/words. "
"default: first.")
@click.option('--scalar-mix', type=bool, is_flag=True, help="Use scalar mix of layers.")
@click.option('--max-iter', type=int, default=None, help='Perform only max-iter iterations (for testing purposes). '
'default: process everything.')
def build(all_entities_file, embedding_type, entity_type, n_trees, output_path,
n_processes, distance_measure, split_parts, model_path, layers, pooling, scalar_mix=False, max_iter=None):
"""
Create an approximative nearest neightbour index, based on the surface strings of entities that enables a fast
lookup of NE-candidates.
ALL_ENTITIES_FILE: Pandas DataFrame pickle that contains all entites.
EMBEDDING_TYPE: Type of embedding [ fasttext, bert ]
ENTITY_TYPE: Type of entities, for instance ORG, LOC, PER ...
N_TREES: Number of trees in the approximative nearest neighbour index
OUTPUT_PATH: Where to write the result files.
"""
embeddings = load_embeddings(embedding_type, model_path=model_path, layers=layers, pooling_operation=pooling,
use_scalar_mix=scalar_mix)
build_index(all_entities_file, embeddings, entity_type, n_trees, n_processes, distance_measure, split_parts,
output_path, max_iter)
@click.command()
@click.argument('tagged-parquet', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding-type', type=click.Choice(['fasttext', 'bert']), required=True, nargs=1)
@click.argument('entities_file', type=str, required=True, nargs=1)
@click.argument('ent-type', type=str, required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('distance-measure', type=click.Choice(['angular', 'euclidean']), required=True, nargs=1)
@click.argument('output-path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--search-k', type=int, default=50, help="Number of NN to be considered. default: 50.")
@click.option('--max-dist', type=float, default=0.25, help="Maximum permitted NN distance. default: 0.25")
@click.option('--processes', type=int, default=6, help='Number of parallel processes. default: 6.')
@click.option('--save-interval', type=int, default=10000, help='Save result every N steps. default: 10000.')
@click.option('--split-parts', type=bool, is_flag=True, help="Process entity surfaces in parts.")
@click.option('--max-iter', type=float, default=np.inf, help="Number of evaluation iterations. "
"default: evaluate everything.")
@click.option('--model-path', type=click.Path(exists=True),
default=None, help="from where to load the embedding model.")
def evaluate(tagged_parquet, embedding_type, entities_file, ent_type, n_trees,
distance_measure, output_path, search_k, max_dist, processes, save_interval,
split_parts, max_iter, model_path):
"""
Evaluate the NE-lookup performance of some approximative nearest neighbour index.
Runs through a many Wikipedia texts where the occurrences of named entities have been marked.
Determines how often the ANN-index manages to provide the correct candidate among the nearest neighbours.
TAGGET_PARQUET: A sqlite file that contains the pre-processed wikipedia text (see tag_entities2sqlite for details)
EMBEDDING_TYPE: 'fasttext' or 'bert'
ENTITIES_FILE: The entity table as pickled Pandas DataFrame.
ENT_TYPE: What type of entities should be considered, for instance: 'PER', 'LOC' or 'ORG'.
N_TREES: Number trees in the approximative nearest neighbour index.
DISTANCE_MEASURE: of the approximative nearest neighbour index, i.e, 'angular' or 'euclidian'.
OUTPUT_PATH: Where to store the result.
"""
embeddings = load_embeddings(embedding_type, model_path=model_path)
print("Reading entity linking ground-truth file: {}".format(tagged_parquet))
df = dd.read_parquet(tagged_parquet)
print("done.")
data_sequence = tqdm(df.iterrows(), total=len(df))
result_path = '{}/nedstat-embt_{}-entt_{}-nt_{}-dm_{}-sk_{}-md_{}.parquet'.\
format(output_path, embedding_type, ent_type, n_trees, distance_measure, search_k, max_dist)
print("Write result statistics to: {} .".format(result_path))
total_successes = mean_rank = mean_len_rank = 0
results = []
def write_results():
nonlocal results
if len(results) == 0:
return
res = pd.concat(results)
# noinspection PyArgumentList
table = pa.Table.from_pandas(res)
pq.write_to_dataset(table, root_path=result_path)
results = []
for total_processed, (entity_title, ranking) in \
enumerate(LookUpBySurface.run(entities_file, {ent_type: embeddings}, data_sequence, split_parts, processes,
n_trees, distance_measure, output_path, search_k, max_dist)):
# noinspection PyBroadException
try:
mean_len_rank += len(ranking)
ranking['true_title'] = entity_title
hits = ranking.loc[ranking.guessed_title == entity_title].copy()
if len(hits) > 0:
hits['success'] = True
result = hits
total_successes += 1
mean_rank += result['rank'].min()
else:
result = ranking.iloc[[0]].copy()
result['success'] = False
results.append(result)
if len(results) >= save_interval:
write_results()
data_sequence.\
set_description('Total processed: {:.3f}. Success rate: {:.3f}. Mean rank: {:.3f}. '
'Mean len rank: {:.3f}.'. format(total_processed, total_successes / total_processed,
mean_rank / (total_successes + 1e-15),
mean_len_rank / total_processed))
if total_processed > max_iter:
break
except:
print("Error: ", ranking, 'page_tile: ', entity_title)
# raise
write_results()
return result_path
@click.command()
@click.argument('all-entities-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('tagged_parquet', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding_type', type=click.Choice(['flair']), required=True, nargs=1)
@click.argument('ent_type', type=str, required=True, nargs=1)
@click.argument('output_path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--max-iter', type=float, default=np.inf)
@click.option('--processes', type=int, default=6)
@click.option('--w-size', type=int, default=10)
@click.option('--batch-size', type=int, default=100)
@click.option('--start-iteration', type=int, default=100)
def build_context_matrix(all_entities_file, tagged_parquet, embedding_type, ent_type, output_path,
processes=6, save_interval=100000, max_iter=np.inf, w_size=10, batch_size=100,
start_iteration=0):
embeddings = load_embeddings(embedding_type)
print("Reading entity linking ground-truth file: {}.".format(tagged_parquet))
df = dd.read_parquet(tagged_parquet)
print("done.")
data_sequence = tqdm(df.iterrows(), total=len(df))
result_file = '{}/context-embeddings-embt_{}-entt_{}-wsize_{}.pkl'.\
format(output_path, embedding_type, ent_type, w_size)
all_entities = pd.read_pickle(all_entities_file)
all_entities = all_entities.loc[all_entities.TYPE == ent_type]
all_entities = all_entities.reset_index().reset_index().set_index('page_title').sort_index()
context_emb = None # lazy creation
for it, link_result in \
enumerate(
EmbedWithContext.run(embeddings, data_sequence, ent_type, w_size, batch_size,
processes, start_iteration=start_iteration)):
try:
if context_emb is None:
dims = len(link_result.drop(['entity_title', 'count']).astype(np.float32).values)
context_emb = np.zeros([len(all_entities), dims + 1], dtype=np.float32)
if it % save_interval == 0:
print('Saving ...')
pd.DataFrame(context_emb, index=all_entities.index).to_pickle(result_file)
idx = all_entities.loc[link_result.entity_title]['index']
context_emb[idx, 1:] += link_result.drop(['entity_title', 'count']).astype(np.float32).values
context_emb[idx, 0] += float(link_result['count'])
data_sequence.set_description('#entity links processed: {}'.format(it))
except:
print("Error: ", link_result)
raise
if it >= max_iter:
break
pd.DataFrame(context_emb, index=all_entities.index).to_pickle(result_file)
return result_file
@click.command()
@click.argument('context-matrix-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('distance-measure', type=click.Choice(['angular', 'euclidean']), required=True, nargs=1)
def build_from_context_matrix(context_matrix_file, n_trees, distance_measure):
build_from_matrix(context_matrix_file, distance_measure, n_trees)
def links_per_entity(context_matrix_file):
df = pd.read_pickle(context_matrix_file)
# Approximate number of links per entity:
return (df.iloc[:, 0]/df.index.str.split('_').str.len().values).sort_values(ascending=False)
@click.command()
@click.argument('index-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('mapping-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('tagged_parquet', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('embedding_type', type=click.Choice(['flair']), required=True, nargs=1)
@click.argument('ent_type', type=str, required=True, nargs=1)
@click.argument('distance-measure', type=click.Choice(['angular', 'euclidean']), required=True, nargs=1)
@click.argument('output_path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--max-iter', type=float, default=np.inf)
@click.option('--processes', type=int, default=6)
@click.option('--w-size', type=int, default=10)
@click.option('--batch-size', type=int, default=100)
@click.option('--start-iteration', type=int, default=100)
@click.option('--search-k', type=int, default=500)
def evaluate_with_context(index_file, mapping_file, tagged_parquet, embedding_type, ent_type, distance_measure,
output_path, processes=6, save_interval=10000, max_iter=np.inf, w_size=10, batch_size=100,
start_iteration=0, search_k=10):
embeddings = load_embeddings(embedding_type)
print("Reading entity linking ground-truth file: {}.".format(tagged_parquet))
df = dd.read_parquet(tagged_parquet)
print("done.")
data_sequence = tqdm(df.iterrows(), total=len(df))
result_path = '{}/nedstat-index_{}-sk_{}.parquet'.format(output_path, Path(index_file).stem, search_k)
print("Write result statistics to: {} .".format(result_path))
results = []
def write_results():
nonlocal results
if len(results) == 0:
return
res = pd.concat(results)
# noinspection PyArgumentList
table = pa.Table.from_pandas(res)
pq.write_to_dataset(table, root_path=result_path)
results = []
total_successes = mean_rank = 0
# The evaluation Semaphore makes sure that the LookUpBySurfaceAndContext task creation will not run away from
# the actual processing of those tasks. If that would happen it would result in ever increasing memory consumption.
evaluation_semaphore = Semaphore(batch_size)
for total_processed, (entity_title, result) in \
enumerate(
LookUpBySurfaceAndContext.run(index_file, mapping_file, distance_measure, search_k, embeddings,
data_sequence, start_iteration, ent_type, w_size, batch_size, processes,
evaluation_semaphore)):
try:
result['true_title'] = entity_title
# noinspection PyUnresolvedReferences
if (result.guessed_title == entity_title).sum() > 0:
result['success'] = True
total_successes += 1
mean_rank += result['rank'].iloc[0]
else:
result['success'] = False
data_sequence. \
set_description('Total processed: {:.3f}. Success rate: {:.3f} Mean rank: {:.3f}'.
format(total_processed, total_successes / (total_processed+1e-15),
mean_rank / (total_successes + 1e-15)))
evaluation_semaphore.release()
results.append(result)
if len(results) >= save_interval:
write_results()
if total_processed >= max_iter:
break
except:
print("Error: ", result)
raise
write_results()
class RefineLookup:
cm = None
def __init__(self, entity_title, ranking, link_embedding):
self._entity_title = entity_title
self._ranking = ranking
self._link_embedding = link_embedding
def __call__(self, *args, **kwargs):
if len(self._ranking) == 1:
return self._entity_title, self._ranking
e = self._link_embedding.drop(['entity_title', 'count']).astype(np.float32).values
e /= float(self._link_embedding['count'])
e /= norm(e)
# noinspection PyBroadException
try:
order = np.argsort(np.square(
RefineLookup.cm.loc[self._ranking.guessed_title].values - repmat(e, len(self._ranking), 1)
).sum(axis=1))
except:
import ipdb;ipdb.set_trace()
raise
self._ranking = \
self._ranking.iloc[order].\
reset_index(drop=True).\
drop(columns=['rank']).\
reset_index().\
rename(columns={'index': 'rank'})
return self._entity_title, self._ranking
@staticmethod
def _get_all(entities_file, data_sequence_1, data_sequence_2, embeddings_1, ent_type_1, split_parts, n_trees, distance_measure_1,
output_path,
search_k_1, max_dist, lookup_semaphore,
embeddings_2, ent_type_2, w_size, batch_size, embed_semaphore, processes):
for total_processed, ((entity_title, ranking), link_embedding) in \
enumerate(zip(
LookUpBySurface.run(entities_file, {ent_type_1: embeddings_1}, data_sequence_1, split_parts,
processes, n_trees, distance_measure_1, output_path, search_k_1, max_dist,
sem=lookup_semaphore),
EmbedWithContext.run(embeddings_2, data_sequence_2, ent_type_2, w_size, batch_size,
processes, sem=embed_semaphore))):
yield RefineLookup(entity_title, ranking, link_embedding)
@staticmethod
def run(entities_file, context_matrix_file, data_sequence_1, data_sequence_2, embeddings_1, ent_type_1, split_parts,
n_trees, distance_measure_1, output_path, search_k_1, max_dist, lookup_semaphore,
embeddings_2, ent_type_2, w_size, batch_size, embed_semaphore, processes,
refine_processes=0):
return \
prun(RefineLookup._get_all(entities_file, data_sequence_1, data_sequence_2, embeddings_1, ent_type_1,
split_parts, n_trees, distance_measure_1, output_path, search_k_1, max_dist,
lookup_semaphore,
embeddings_2, ent_type_2, w_size, batch_size, embed_semaphore, processes),
initializer=RefineLookup.initialize, initargs=(context_matrix_file,), processes=refine_processes)
@staticmethod
def initialize(context_matrix_file):
cm = pd.read_pickle(context_matrix_file)
for idx in tqdm(range(len(cm))):
if cm.iloc[idx, 0] == 0:
continue
cm.iloc[idx, 1:] = cm.iloc[idx, 1:] / norm(cm.iloc[idx, 1:])
cm = cm.iloc[:, 1:]
RefineLookup.cm = cm
@click.command()
@click.argument('entities-file', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('tagged-parquet', type=click.Path(exists=True), required=True, nargs=1)
@click.argument('ent-type', type=str, required=True, nargs=1)
@click.argument('embedding-type-1', type=click.Choice(['fasttext']), required=True, nargs=1)
@click.argument('n-trees', type=int, required=True, nargs=1)
@click.argument('distance-measure-1', type=click.Choice(['angular', 'euclidean']), required=True, nargs=1)
@click.argument('embedding-type-2', type=click.Choice(['flair']), required=True, nargs=1)
@click.argument('w-size', type=int, required=True, nargs=1)
@click.argument('batch-size', type=int, required=True, nargs=1)
@click.argument('output-path', type=click.Path(exists=True), required=True, nargs=1)
@click.option('--search-k-1', type=int, default=50)
@click.option('--max-dist', type=float, default=0.25)
@click.option('--processes', type=int, default=6)
@click.option('--save-interval', type=int, default=10000)
@click.option('--max-iter', type=float, default=np.inf)
def evaluate_combined(entities_file, tagged_parquet, ent_type,
embedding_type_1, n_trees, distance_measure_1,
embedding_type_2, w_size, batch_size,
output_path,
search_k_1=50, max_dist=0.25, processes=6, save_interval=10000,
max_iter=np.inf, split_parts=True):
embeddings_1 = load_embeddings(embedding_type_1)
embeddings_2 = load_embeddings(embedding_type_2)
print("Reading entity linking ground-truth file: {}".format(tagged_parquet))
df = dd.read_parquet(tagged_parquet)
print("done.")
data_sequence_1 = tqdm(df.iterrows(), total=len(df))
data_sequence_2 = df.iterrows()
result_path = '{}/nedstat-embt1_{}-embt2_{}-entt_{}-nt_{}-dm1_{}-sk_{}-md_{}-wsize_{}.parquet'.\
format(output_path, embedding_type_1, embedding_type_2, ent_type, n_trees, distance_measure_1,
search_k_1, max_dist, w_size)
print("Write result statistics to: {} .".format(result_path))
total_successes = mean_rank = mean_len_rank = 0
results = []
context_matrix_file = '{}/context-embeddings-embt_{}-entt_{}-wsize_{}.pkl'.\
format(output_path, embedding_type_2, ent_type, w_size)
def write_results():
nonlocal results
if len(results) == 0:
return
res = pd.concat(results, sort=True)
# noinspection PyArgumentList
table = pa.Table.from_pandas(res)
pq.write_to_dataset(table, root_path=result_path)
results = []
# The Semaphores make sure that the task creation will not run away from each other.
# If that would happen it would result in ever increasing memory consumption.
lookup_semaphore = Semaphore(batch_size*2)
embed_semaphore = Semaphore(batch_size*2)
for total_processed, (entity_title, ranking) in \
enumerate(RefineLookup.run(entities_file, context_matrix_file, data_sequence_1, data_sequence_2, embeddings_1,
ent_type, split_parts, n_trees, distance_measure_1, output_path, search_k_1,
max_dist, lookup_semaphore, embeddings_2, ent_type, w_size, batch_size, embed_semaphore,
processes)):
# noinspection PyBroadException
try:
mean_len_rank += len(ranking)
ranking['true_title'] = entity_title
hits = ranking.loc[ranking.guessed_title == entity_title].copy()
if len(hits) > 0:
hits['success'] = True
result = hits
total_successes += 1
mean_rank += result['rank'].min()
else:
result = ranking.iloc[[0]].copy()
result['success'] = False
results.append(result)
if len(results) >= save_interval:
write_results()
data_sequence_1.\
set_description('Total processed: {:.3f}. Success rate: {:.3f}. Mean rank: {:.3f}. '
'Mean len rank: {:.3f}.'. format(total_processed, total_successes /
(total_processed + 1e-15),
mean_rank / (total_successes + 1e-15),
mean_len_rank / (total_processed + 1e-15)))
if total_processed > max_iter:
break
except:
print("Error: ", ranking, 'page_tile: ', entity_title)
raise
lookup_semaphore.release()
if total_processed % batch_size == 0:
embed_semaphore.release()
write_results()
return result_path
class NEDDataTask:
def __init__(self, page_id, text, tags, link_titles, page_title, ent_types=None):
self._page_id = page_id
self._text = text
self._tags = tags
self._link_titles = link_titles
self._page_title = page_title
if ent_types is None:
self._ent_types = {'PER', 'LOC', 'ORG'}
def __call__(self, *args, **kwargs):
sentences = json.loads(self._text)
link_titles = json.loads(self._link_titles)
tags = json.loads(self._tags)
df_sentence = []
df_linking = []
for sen, sen_link_titles, sen_tags in zip(sentences, link_titles, tags):
if len(self._ent_types.intersection({t if len(t) < 3 else t[2:] for t in sen_tags})) == 0:
# Do not further process sentences that do not contain a relevant linked entity of type "ent_types".
continue
tmp1 = {'id': [len(df_sentence)],
'text': [json.dumps(sen)],
'tags': [json.dumps(sen_tags)],
'entities': [json.dumps(sen_link_titles)],
'page_id': [self._page_id],
'page_title': [self._page_title]}
sen_link_titles = [t for t in list(set(sen_link_titles)) if len(t) > 0]
tmp2 = {'target': sen_link_titles, 'sentence': len(sen_link_titles) * [len(df_sentence)]}
df_sentence.append(pd.DataFrame.from_dict(tmp1).reset_index(drop=True))
df_linking.append(pd.DataFrame.from_dict(tmp2).reset_index(drop=True))
if len(df_sentence) > 0:
return | pd.concat(df_sentence) | pandas.concat |
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.integrate
import growth.model
import growth.viz
colors, palette = growth.viz.matplotlib_style()
const = growth.model.load_constants()
# Set the constants
gamma_max = const['gamma_max']
nu_max = 4.5
Kd_cpc = const['Kd_cpc']
Kd_cnt = const['Kd_cnt']
phi_O = const['phi_O']
Y = const['Y']
c_nt = 100
phiRb = growth.model.phiRb_optimal_allocation(gamma_max, nu_max, Kd_cpc, phi_O)
cpc = growth.model.steady_state_precursors(gamma_max, phiRb, nu_max, Kd_cpc, phi_O)
# set the time range
dt = 0.001
time_range = np.arange(0, 4 + dt, dt)
perturb = [0.5, 2]
# Compute the pre-perturbation
params = [1, phiRb, 1 - phi_O - phiRb, cpc, c_nt]
args = (gamma_max, nu_max, Y, phiRb, 1 - phi_O - phiRb, Kd_cpc, Kd_cnt)
out = scipy.integrate.odeint(growth.model.self_replicator, params, time_range, args)
pre_df = pd.DataFrame(out, columns=['M', 'M_Rb', 'M_Mb', 'c_pc', 'c_nt'])
pre_df['perturbation'] = perturb[0]
pre_df['dilution'] = True
pre_df['time'] = time_range
# Make the perturbation
params = list(out[-1])
# params[3] *= perturb
dfs = []
for i, p in enumerate(perturb):
params[3] = p * cpc
# Integration with dilution
args = (gamma_max, nu_max, Y, phiRb, 1 - phi_O - phiRb, Kd_cpc, Kd_cnt, False)
out_dil = scipy.integrate.odeint(growth.model.self_replicator, params, time_range, args)
dil_df = pd.DataFrame(out_dil, columns=['M', 'M_Rb', 'M_Mb', 'c_pc', 'c_nt'])
dil_df['dilution'] = True
dil_df['time'] = time_range + time_range.max() - dt
dil_df['perturb'] = p
# Integration without dilution
args = (gamma_max, nu_max, Y, phiRb, 1 - phi_O - phiRb, Kd_cpc, Kd_cnt, True)
out_nodil = scipy.integrate.odeint(growth.model.self_replicator, params, time_range, args)
nodil_df = pd.DataFrame(out_nodil, columns=['M', 'M_Rb', 'M_Mb', 'c_pc', 'c_nt'])
nodil_df['dilution'] = False
nodil_df['time'] = time_range + time_range.max() - dt
nodil_df['perturb'] = p
dfs.append(dil_df)
dfs.append(nodil_df)
df = | pd.concat(dfs, sort=False) | pandas.concat |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Assignment 2 - Pandas Introduction
# All questions are weighted the same in this assignment.
# ## Part 1
# The following code loads the olympics dataset (olympics.csv), which was derrived from the Wikipedia entry on [All Time Olympic Games Medals](https://en.wikipedia.org/wiki/All-time_Olympic_Games_medal_table), and does some basic data cleaning.
#
# The columns are organized as # of Summer games, Summer medals, # of Winter games, Winter medals, total # number of games, total # of medals. Use this dataset to answer the questions below.
# In[10]:
import pandas as pd
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold'+col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver'+col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#'+col[1:]}, inplace=True)
names_ids = df.index.str.split('\s\(') # split the index by '('
df.index = names_ids.str[0] # the [0] element is the country name (new index)
df['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)
df = df.drop('Totals')
df.head()
# ### Question 0 (Example)
#
# What is the first country in df?
#
# *This function should return a Series.*
# In[11]:
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_one():
# This function returns the row for Afghanistan, which is a Series object. The assignment
# question description will tell you the general format the autograder is expecting
return df.iloc[0]
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_one()
# ### Question 1
# Which country has won the most gold medals in summer games?
#
# *This function should return a single string value.*
# In[88]:
def answer_one():
Most_GM=df.loc[df['Gold'].argmax()].name
return Most_GM
answer_one()
# ### Question 2
# Which country had the biggest difference between their summer and winter gold medal counts?
#
# *This function should return a single string value.*
# In[89]:
def answer_two():
return df.loc[(df['Gold']-df['Gold.1']).argmax()].name
answer_two()
# ### Question 3
# Which country has the biggest difference between their summer gold medal counts and winter gold medal counts relative to their total gold medal count?
#
# $$\frac{Summer~Gold - Winter~Gold}{Total~Gold}$$
#
# Only include countries that have won at least 1 gold in both summer and winter.
#
# *This function should return a single string value.*
# In[142]:
def answer_three():
rG=df.loc[(df['Gold']>=1)&(df['Gold.1']>=1)]
rG['rgn']=(rG['Gold']-rG['Gold.1'])/(rG['Gold.2'])
rgn_1=rG.sort_values(['rgn'],ascending=False).head(1)
rgn=rgn_1.index[0]
return rgn
answer_three()
# ### Question 4
# Write a function that creates a Series called "Points" which is a weighted value where each gold medal (`Gold.2`) counts for 3 points, silver medals (`Silver.2`) for 2 points, and bronze medals (`Bronze.2`) for 1 point. The function should return only the column (a Series object) which you created.
#
# *This function should return a Series named `Points` of length 146*
# In[43]:
def answer_four():
Points=pd.Series((df['Gold.2']*3)+(df['Silver.2']*2+(df['Bronze.2']*1)))
return Points
answer_four()
# ## Part 2
# For the next set of questions, we will be using census data from the [United States Census Bureau](http://www.census.gov/popest/data/counties/totals/2015/CO-EST2015-alldata.html). Counties are political and geographic subdivisions of states in the United States. This dataset contains population data for counties and states in the US from 2010 to 2015. [See this document](http://www.census.gov/popest/data/counties/totals/2015/files/CO-EST2015-alldata.pdf) for a description of the variable names.
#
# The census dataset (census.csv) should be loaded as census_df. Answer questions using this as appropriate.
#
# ### Question 5
# Which state has the most counties in it? (hint: consider the sumlevel key carefully! You'll need this for future questions too...)
#
# *This function should return a single string value.*
# In[1]:
import pandas as pd
import numpy as np
census_df = pd.read_csv('census.csv')
census_df.head()
# In[2]:
def answer_five():
us_states=dict()
county=census_df['CTYNAME'].tolist()
#print('total number of county is',len(county) )
state=dict()
count=0
dic_c=0
ls=None
st=None
for r,cod,st in zip(census_df.CTYNAME,census_df.COUNTY,census_df.STATE):
count=count+1
for j in census_df.loc[(census_df['CTYNAME']==r)&(census_df['COUNTY']==cod)&
(census_df['STATE']==st)]['STNAME']:
us_states[j]=us_states.get(j,0)+1
dic_c=dic_c+1
for state,count in us_states.items():
if ls is None or count>ls:
st=state
ls=count
return st
answer_five()
# ### Question 6
# Only looking at the three most populous counties for each state, what are the three most populous states (in order of highest population to lowest population)? Use `CENSUS2010POP`.
#
# *This function should return a list of string values.*
# In[3]:
def answer_six():
for i in census_df['STNAME'].unique():
simp=census_df[(census_df.STNAME==i)&(census_df.SUMLEV==50)].sort_values(['CENSUS2010POP']
,ascending=False).head(3)
try:
ls= | pd.concat([ls,simp],axis=0) | pandas.concat |
"""
Author: <NAME>
Created: 14/08/2020 11:04 AM
"""
import os
import numpy as np
import pandas as pd
from basgra_python import run_basgra_nz, _trans_manual_harv, get_month_day_to_nonleap_doy
from input_output_keys import matrix_weather_keys_pet
from check_basgra_python.support_for_tests import establish_org_input, get_org_correct_values, get_lincoln_broadfield, \
test_dir, establish_peyman_input, _clean_harvest, base_auto_harvest_data, base_manual_harvest_data
from supporting_functions.plotting import plot_multiple_results # used in test development and debugging
verbose = False
drop_keys = [ # newly added keys that must be dropped initially to manage tests, datasets are subsequently re-created
'WAFC',
'IRR_TARG',
'IRR_TRIG',
'IRRIG_DEM',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP',
'MXPAW',
'PAW',
'RESEEDED',
]
view_keys = [
'WAL',
'WCL',
'DM',
'YIELD',
'BASAL',
'ROOTD',
'IRRIG_DEM',
'HARVFR',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP', # # mm # Water in non-frozen root zone at wilting point
'MXPAW', # mm # maximum Profile available water
'PAW', # mm Profile available water at the time step
]
def test_trans_manual_harv(update_data=False):
test_nm = 'test_trans_manual_harv'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
np.random.seed(1)
days_harvest.loc[:, 'harv_trig'] = np.random.rand(len(days_harvest))
np.random.seed(2)
days_harvest.loc[:, 'harv_targ'] = np.random.rand(len(days_harvest))
np.random.seed(3)
days_harvest.loc[:, 'weed_dm_frac'] = np.random.rand(len(days_harvest))
out = _trans_manual_harv(days_harvest, matrix_weather)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out, dropable=False)
def _output_checks(out, correct_out, dropable=True):
"""
base checker
:param out: basgra data from current test
:param correct_out: expected basgra data
:param dropable: boolean, if True, can drop output keys, allows _output_checks to be used for not basgra data and
for new outputs to be dropped when comparing results.
:return:
"""
if dropable:
# should normally be empty, but is here to allow easy checking of old tests against versions with a new output
drop_keys_int = [
]
out2 = out.drop(columns=drop_keys_int)
else:
out2 = out.copy(True)
# check shapes
assert out2.shape == correct_out.shape, 'something is wrong with the output shapes'
# check datatypes
assert issubclass(out.values.dtype.type, np.float), 'outputs of the model should all be floats'
out2 = out2.values
correct_out2 = correct_out.values
out2[np.isnan(out2)] = -9999.99999
correct_out2[np.isnan(correct_out2)] = -9999.99999
# check values match for sample run
isclose = np.isclose(out2, correct_out2)
asmess = '{} values do not match between the output and correct output with rtol=1e-05, atol=1e-08'.format(
(~isclose).sum())
assert isclose.all(), asmess
print(' model passed test\n')
def test_org_basgra_nz(update_data=False):
print('testing original basgra_nz')
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
# test against my saved version (simply to have all columns
data_path = os.path.join(test_dir, 'test_org_basgra.csv')
if update_data:
out.to_csv(data_path)
print(' testing against full dataset')
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# test to the original data provided by <NAME>ward
out.drop(columns=drop_keys, inplace=True) # remove all of the newly added keys
print(' testing against Simon Woodwards original data')
correct_out2 = get_org_correct_values()
_output_checks(out, correct_out2)
def test_irrigation_trigger(update_data=False):
print('testing irrigation trigger')
# note this is linked to test_leap, so any inputs changes there should be mapped here
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_trigger_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irrigation_fraction(update_data=False):
print('testing irrigation fraction')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .60 # irrigation of 60% of what is needed to get to field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_fraction_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_water_short(update_data=False):
print('testing water shortage')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.8
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_water_short_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_short_season(update_data=False):
print('testing short season')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_short_season_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_variable_irr_trig_targ(update_data=False):
print('testing time variable irrigation triggers and targets')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[matrix_weather.index > '2013-08-01', 'irr_trig'] = 0.7
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather.loc[(matrix_weather.index < '2012-08-01'), 'irr_targ'] = 0.8
matrix_weather.loc[(matrix_weather.index > '2015-08-01'), 'irr_targ'] = 0.8
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_variable_irr_trig_targ.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irr_paw(update_data=False):
test_nm = 'test_irr_paw'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 0.9
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
params['irr_frm_paw'] = 1
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_pet_calculation(update_data=False):
# note this test was not as throughrougly investigated as it was not needed for my work stream
print('testing pet calculation')
params, matrix_weather, days_harvest, doy_irr = establish_peyman_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default',
supply_pet=False)
data_path = os.path.join(test_dir, 'test_pet_calculation.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# Manual Harvest tests
def test_fixed_harvest_man(update_data=False):
test_nm = 'test_fixed_harvest_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 10
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2000
days_harvest.loc[idx, 'harv_targ'] = 100
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_harv_trig_man(update_data=False):
# test manaual harvesting dates with a set trigger, weed fraction set to zero
test_nm = 'test_harv_trig_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_man(update_data=False):
# test manual harvesting trig set to zero +- target with weed fraction above 0
test_nm = 'test_weed_fraction_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.5
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 1
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# automatic harvesting tests
def test_auto_harv_trig(update_data=False):
test_nm = 'test_auto_harv_trig'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_auto_harv_fixed(update_data=False):
test_nm = 'test_auto_harv_fixed'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = base_auto_harvest_data(matrix_weather)
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_auto(update_data=False):
# test auto harvesting trig set +- target with weed fraction above 0
test_nm = 'test_weed_fraction_auto'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 1.25
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.75
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = | pd.read_csv(data_path, index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
(1811, 'Alkmaar'),
(7471, 'Goor'),
(7556, 'Hengelo'),
(7903, 'Hoogeveen'),
(7942, 'Meppel'),
(8011, 'Zwolle'),
(8232, 'Lelystad'),
(8442, 'Heerenveen'),
(8911, 'Leeuwarden'),
(9291, 'Kollum'),
(9501, 'Stadskanaal'),
(9726, 'Groningen'),
# Regio Midden
(2406, '<NAME>/<NAME>'),
(2515, '<NAME>'),
(3013, 'Rotterdam'),
(3511, 'Utrecht'),
(3901, 'Veenendaal'),
((7137, 7131), 'Lichtenvoorde|Groenlo'),
(7311, 'Apeldoorn'),
# Regio Zuid
(4325, 'Renesse'),
(4462, 'Goes'),
(4701, 'Roosendaal'),
(5038, 'Tilburg'),
(5401, 'Uden'),
(5611, 'Eindhoven'),
(5801, 'Oostrum'),
(6101, 'Echt'),
(6229, 'Maastricht'),
(6541, 'Nijmegen'),
])
def get_bad_scan_times():
"""Return list of Timestamps with bad scan times, from CSV data."""
df = pd.read_csv('data-ggd/ggd_bad_scans.txt', comment='#')
tstamps = | pd.to_datetime(df['Timestamp']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
import os
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
import numpy as np
import os
import pandas as pd
import argparse
import glob
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.special import softmax
TEMPERATURE = 1.548991 # optimized temperature for calibration of Catnet
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=str,
help='path to folder containing test results in csv'
)
args = parser.parse_args()
plt.rcParams['font.size'] = 16
cmap = plt.cm.get_cmap('tab20') # 11 discrete colors
csv_files = glob.glob(os.path.join(args.input, '*.csv'))
# ensemble predictions for rsd and experience over folds
all_df = []
for file in csv_files:
vname = os.path.splitext(os.path.basename(file))[0]
df = pd.read_csv(file)
df['gt_rsd'] = np.max(df['elapsed']) - df['elapsed']
df['video'] = vname
all_df.append(df)
fig = plt.figure(figsize=[10, 5])
gs = fig.add_gridspec(3, 3, height_ratios=[2.2, 1, 1], hspace=0.0, width_ratios=[4,1,1])
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[2, 0])
ax4 = fig.add_subplot(gs[:, 1])
ax5 = fig.add_subplot(gs[:, 2])
# legend for steps
ax4.imshow(np.arange(11)[:, None], extent=[0, 1, 0, 11], cmap=cmap, interpolation='none', vmin=0, vmax=11)
ax4.set_xticks([])
ax4.yaxis.tick_right()
phases = ['None','Inc', 'VisAgInj', 'Rhexis', 'Hydro', 'Phaco', 'IrAsp', 'CapsPol', 'LensImpl', 'VisAgRem', 'TonAnti']
phases.reverse()
ax4.set_yticks(ticks=np.arange(11)+0.5)
ax4.set_yticklabels(phases)
# create a second axes for the colorbar
ax5.imshow(np.linspace(0,7,100)[:, None], extent=[0, 1, 0, 9], cmap='cividis', interpolation='none', vmin=0, vmax=7)
ax5.set_xticks([])
ax5.yaxis.tick_right()
ax5.set_yticks([0,9])
ax5.set_yticklabels(['senior', 'assistant'])
#plt.subplots(3, 1, gridspec_kw={'height_ratios': [2.2, 1, 1]})
ax1.plot(df['elapsed'], df['gt_rsd'], linewidth=2)
ax1.plot(df['elapsed'], df['predicted_rsd'], linewidth=2)
ax1.set_ylim(bottom=0.0)
ax1.set_xlim(left=np.min(df['elapsed']), right=np.max(df['elapsed']))
ax1.legend(['ground-truth', 'predicted'])
ax1.set_xticks([])
# perform temperature scaling of experience predictions
experience_cal = softmax(np.column_stack([df['predicted_senior'], df['predicted_assistant']])/TEMPERATURE, axis=-1)[:, 0]
height = np.max(df['elapsed'])/15
ax2.imshow(df['predicted_step'][None, :].astype(int), cmap=cmap,
extent=[0.0, np.max(df['elapsed']), -height, height], interpolation='none', vmin=0, vmax=11)
ax3.imshow(experience_cal[None, :],cmap='cividis',
extent=[0.0, np.max(df['elapsed']), -height, height], interpolation='none', vmin=0, vmax=1)
ax2.set_xticks([])
ax2.set_ylabel('phase\n')
ax3.set_ylabel('exp.\n')
ax1.set_ylabel('RSD (min)')
ax3.set_yticks([])
ax2.set_yticks([])
ax3.set_xlabel('elapsed time (min)')
plt.savefig(os.path.join(args.input, vname + '.png'))
plt.close()
all_df = | pd.concat(all_df) | pandas.concat |
from abc import abstractmethod
from hashlib import new
from analizer.abstract.expression import Expression
from analizer.abstract import expression
from enum import Enum
import sys
sys.path.append("../../..")
from storage.storageManager import jsonMode
from analizer.typechecker.Metadata import Struct
from analizer.typechecker import Checker
import pandas as pd
from analizer.symbol.symbol import Symbol
from analizer.symbol.environment import Environment
from analizer.reports import Nodo
from analizer.reports import AST
import analizer
from prettytable import PrettyTable
from analizer.abstract.expression import Primitive, TYPE
ast = AST.AST()
root = None
envVariables = []
class SELECT_MODE(Enum):
ALL = 1
PARAMS = 2
# carga de datos
Struct.load()
# variable encargada de almacenar la base de datos a utilizar
dbtemp = ""
# listas encargadas de almacenar los errores semanticos
syntaxPostgreSQL = list()
semanticErrors = list()
syntaxErrors = list()
def makeAst(root):
try:
ast.makeAst(root)
except:
pass
class Instruction:
"""
Esta clase representa una instruccion
"""
def __init__(self, row, column) -> None:
self.row = row
self.column = column
@abstractmethod
def execute(self, environment):
"""
Metodo que servira para ejecutar las expresiones
"""
@abstractmethod
def c3d(self, environment):
"""
Metodo que servira para ejecutar las expresiones
"""
class SelectParams(Instruction):
def __init__(self, params, row, column):
Instruction.__init__(self, row, column)
self.params = params
def execute(self, environment):
pass
#ya
class Select(Instruction):
def __init__(
self,
params,
fromcl,
wherecl,
groupbyCl,
havingCl,
limitCl,
distinct,
orderbyCl,
row,
column,
):
Instruction.__init__(self, row, column)
self.params = params
self.wherecl = wherecl
self.fromcl = fromcl
self.groupbyCl = groupbyCl
self.havingCl = havingCl
self.limitCl = limitCl
self.distinct = distinct
self.orderbyCl = orderbyCl
def execute(self, environment):
try:
newEnv = Environment(environment, dbtemp)
global envVariables
envVariables.append(newEnv)
self.fromcl.execute(newEnv)
if self.wherecl != None:
self.wherecl.execute(newEnv)
if self.groupbyCl != None:
newEnv.groupCols = len(self.groupbyCl)
groupDf = None
groupEmpty = True
if self.params:
params = []
for p in self.params:
if isinstance(p, expression.TableAll):
result = p.execute(newEnv)
for r in result:
params.append(r)
else:
params.append(p)
labels = [p.temp for p in params]
if self.groupbyCl != None:
value = []
for i in range(len(params)):
ex = params[i].execute(newEnv)
val = ex.value
newEnv.types[labels[i]] = ex.type
# Si no es columna de agrupacion
if i < len(self.groupbyCl):
if not (
isinstance(val, pd.core.series.Series)
or isinstance(val, pd.DataFrame)
):
nval = {
val: [
val for i in range(len(newEnv.dataFrame.index))
]
}
nval = pd.DataFrame(nval)
val = nval
newEnv.dataFrame = pd.concat(
[newEnv.dataFrame, val], axis=1
)
else:
if groupEmpty:
countGr = newEnv.groupCols
# Obtiene las ultimas columnas metidas (Las del group by)
df = newEnv.dataFrame.iloc[:, -countGr:]
cols = list(df.columns)
groupDf = df.groupby(cols).sum().reset_index()
groupDf = pd.concat([groupDf, val], axis=1)
groupEmpty = False
else:
groupDf = pd.concat([groupDf, val], axis=1)
if groupEmpty:
countGr = newEnv.groupCols
# Obtiene las ultimas columnas metidas (Las del group by)
df = newEnv.dataFrame.iloc[:, -countGr:]
cols = list(df.columns)
groupDf = df.groupby(cols).sum().reset_index()
groupEmpty = False
else:
value = [p.execute(newEnv) for p in params]
for j in range(len(labels)):
newEnv.types[labels[j]] = value[j].type
newEnv.dataFrame[labels[j]] = value[j].value
else:
value = [newEnv.dataFrame[p] for p in newEnv.dataFrame]
labels = [p for p in newEnv.dataFrame]
if self.orderbyCl != None:
order_build = []
kind_order = True
Nan_pos = None
for order_item in self.orderbyCl:
#GENERAR LA LISTA DE COLUMNAS PARA ORDENAR
result = order_item[0].execute(newEnv)
order_build.append(result.value.name)
#TIPO DE ORDEN
if order_item[1].lower() == 'asc':
kind_order = True
else:
kind_order = False
# POSICIÓN DE NULOS
if order_item[2] == None:
pass
elif order_item[2].lower() == 'first':
Nan_pos = 'first'
elif order_item[2].lower() == 'last':
Nan_pos = 'last'
if Nan_pos != None:
newEnv.dataFrame = newEnv.dataFrame.sort_values(by = order_build, ascending = kind_order, na_position = Nan_pos)
else:
newEnv.dataFrame = newEnv.dataFrame.sort_values(by = order_build, ascending = kind_order)
if value != []:
value = [newEnv.dataFrame[p] for p in newEnv.dataFrame]
labels = [p for p in newEnv.dataFrame]
if value != []:
if self.wherecl == None:
df_ = newEnv.dataFrame.filter(labels)
if self.limitCl:
df_ = self.limitCl.execute(df_, newEnv)
if self.distinct:
return [df_.drop_duplicates(), newEnv.types]
return [df_, newEnv.types]
w2 = newEnv.dataFrame.filter(labels)
# Si la clausula WHERE devuelve un dataframe vacio
if w2.empty:
return None
df_ = w2
if self.limitCl:
df_ = self.limitCl.execute(df_, newEnv)
if self.distinct:
return [df_.drop_duplicates(), newEnv.types]
return [df_, newEnv.types]
else:
newNames = {}
i = 0
for (columnName, columnData) in groupDf.iteritems():
newNames[columnName] = labels[i]
i += 1
groupDf.rename(columns=newNames, inplace=True)
df_ = groupDf
if self.limitCl:
df_ = self.limitCl.execute(df_, newEnv)
if self.distinct:
return [df_.drop_duplicates(), newEnv.types]
return [df_, newEnv.types]
except:
print("Error: P0001: Error en la instruccion SELECT")
def dot(self):
new = Nodo.Nodo("SELECT")
paramNode = Nodo.Nodo("PARAMS")
new.addNode(paramNode)
if self.distinct:
dis = Nodo.Nodo("DISTINCT")
new.addNode(dis)
if len(self.params) == 0:
asterisco = Nodo.Nodo("*")
paramNode.addNode(asterisco)
else:
for p in self.params:
paramNode.addNode(p.dot())
new.addNode(self.fromcl.dot())
if self.wherecl != None:
new.addNode(self.wherecl.dot())
if self.groupbyCl != None:
gb = Nodo.Nodo("GROUP_BY")
new.addNode(gb)
for g in self.groupbyCl:
gb.addNode(g.dot())
if self.havingCl != None:
hv = Nodo.Nodo("HAVING")
new.addNode(hv)
hv.addNode(self.havingCl.dot())
if self.orderbyCl != None:
ob = Nodo.Nodo("ORDER_BY")
new.addNode(ob)
for o in self.orderbyCl:
ob.addNode(o[0].dot())
to = Nodo.Nodo(o[1])
ob.addNode(to)
coma = Nodo.Nodo(",")
ob.addNode(coma)
if o[2] != None:
on = Nodo.Nodo(o[2])
ob.addNode(on)
if self.limitCl != None:
new.addNode(self.limitCl.dot())
return new
def c3d(self, environment):
cont = environment.conta_exec
environment.codigo += "".join(environment.count_tabs) + "C3D.pila = "+str(cont)+"\n"
environment.codigo += "".join(environment.count_tabs) + "C3D.ejecutar() #Crear Select\n\n"
environment.conta_exec += 1
#ya
class FromClause(Instruction):
"""
Clase encargada de la clausa FROM para la obtencion de datos
"""
def __init__(self, tables, aliases, row, column):
Instruction.__init__(self, row, column)
self.tables = tables
self.aliases = aliases
def crossJoin(self, tables):
if len(tables) <= 1:
return tables[0]
for t in tables:
t["____tempCol"] = 1
new_df = tables[0]
i = 1
while i < len(tables):
new_df = pd.merge(new_df, tables[i], on=["____tempCol"])
i += 1
new_df = new_df.drop("____tempCol", axis=1)
return new_df
def execute(self, environment):
tempDf = None
for i in range(len(self.tables)):
exec = self.tables[i].execute(environment)
data = exec[0]
types = exec[1]
if isinstance(self.tables[i], Select):
newNames = {}
subqAlias = self.aliases[i]
for (columnName, columnData) in data.iteritems():
colSplit = columnName.split(".")
if len(colSplit) >= 2:
newNames[columnName] = subqAlias + "." + colSplit[1]
types[subqAlias + "." + colSplit[1]] = columnName
else:
newNames[columnName] = subqAlias + "." + colSplit[0]
types[subqAlias + "." + colSplit[0]] = columnName
data.rename(columns=newNames, inplace=True)
environment.addVar(subqAlias, subqAlias, "TABLE", self.row, self.column)
else:
sym = Symbol(
self.tables[i].name,
None,
self.tables[i].row,
self.tables[i].column,
None,
None
)
environment.addSymbol(self.tables[i].name, sym)
if self.aliases[i]:
environment.addSymbol(self.aliases[i], sym)
if i == 0:
tempDf = data
else:
tempDf = self.crossJoin([tempDf, data])
environment.dataFrame = tempDf
try:
environment.types.update(types)
except:
print(
"Error: P0001: Error en la instruccion SELECT clausula FROM"
)
return
def dot(self):
new = Nodo.Nodo("FROM")
for t in self.tables:
if isinstance(t, analizer.abstract.instruction.Select):
n = t.dot()
new.addNode(n)
else:
t1 = Nodo.Nodo(t.name)
new.addNode(t1)
for a in self.aliases:
a1 = Nodo.Nodo(a)
new.addNode(a1)
return new
def c3d(self, environment):
cont = environment.conta_exec
environment.codigo += "".join(environment.count_tabs) + "C3D.pila = "+str(cont)+"\n"
environment.codigo += "".join(environment.count_tabs) + "C3D.ejecutar() #Clausula From\n\n"
environment.conta_exec += 1
#ya
class TableID(Expression):
"""
Esta clase representa un objeto abstracto para el manejo de las tablas
"""
type_ = None
def __init__(self, name, row, column):
Expression.__init__(self, row, column)
self.name = name
def execute(self, environment):
result = jsonMode.extractTable(dbtemp, self.name)
if result == None:
semanticErrors.append(
[
"La tabla "
+ str(self.name)
+ " no pertenece a la base de datos "
+ dbtemp,
self.row,
]
)
print(
"Error: 42P01: la relacion "
+ dbtemp
+ "."
+ str(self.name)
+ " no existe"
)
# Almacena una lista con con el nombre y tipo de cada columna
lst = Struct.extractColumns(dbtemp, self.name)
columns = [l.name for l in lst]
newColumns = [self.name + "." + col for col in columns]
df = | pd.DataFrame(result, columns=newColumns) | pandas.DataFrame |
from pathlib import Path
from src import utils
from src.data import DataLoaders
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
import pickle
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, accuracy_score, confusion_matrix,mean_squared_error,mean_absolute_error,roc_auc_score, r2_score
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import zscore, pearsonr
class ModelBaseClass:
def __init__(self):
pass
def save(self):
pass
def load(self):
pass
class FireRiskModels():
def __init__(self,Modeltype ='propensity',ACS_year = '2016'):
self.SEED = 0
self.type = Modeltype
def train(self,NFIRS,ACS,ACS_variables =None, test_year='Current',n_years = 5):
# Create framework to predict whether a given census block has a fire risk score in the 90th percentile
# based on the specific number of previous years' data
if not ACS_variables:
ACS_variables = ACS.data.columns
self.ACS_variables_used = ACS_variables
ACS_data = ACS.data[ACS_variables]
ACS_data = ACS_data
fires = NFIRS.fires
top10 = NFIRS.top10
years = top10.columns
Model_Input = None
Model_Predictions = None
Model_Prediction_Probs = None
Model_Actual = None
# get year index
if test_year == 'Current':
test_year = fires.columns[-1]
if test_year in fires.columns:
test_year_idx = fires.columns.get_loc(test_year)
else:
raise ValueError(f"{test_year} is not in NFIRS Data."
f" The most recent year in NFIRS is {fires.columns[-1]}")
# each model will train on `n_years` of data to predict the locations subsequent year with highest fire risk
# model will train predicting 1 year and then test model accuracy by predicting the next year
# for example:
# Train
# predict 2016 using 2015-2012 data
#
#
# Test
# predict 2017 using 2016-2015 data
X_train, y_train,input1, Xtrain_years = self.munge_dataset(top10,fires,ACS_data,ACS.tot_pop, n_years,test_year_idx-1)
self.X_train = X_train
X_test, y_test,Input, Xtest_years = self.munge_dataset(top10,fires,ACS_data,ACS.tot_pop, n_years,test_year_idx)
#X_test, y_test,Input, self.train_years = self.munge_dataset_test(top10,fires,ACS_data,ACS.tot_pop, n_years,test_year_idx)
model_years = np.append(Xtrain_years, fires.columns[test_year_idx-1])
inference_years = np.append(Xtest_years, str(test_year))
self.years_used = np.union1d(model_years, inference_years)
# Note: `Input` is used for manual data validation to ensure munging performed correctly
# Create 80/20 training/testing set split
#X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = .2 )
# Perform resampling if data classes are unbalanced
X_train, y_train = self.resample_df(X_train,y_train,upsample = False)
# Standardize features by removing the mean and scaling to unit variance
# scaler = preprocessing.StandardScaler().fit(X_train)
# X_train= scaler.transform(X_train)
# X_test = scaler.transform(X_test)
# Fit model to training set
print('Predicting {}:'.format(str(test_year)) )
model = XGBClassifier( n_estimators=60,
max_depth=10,
random_state=0,
max_features = None,
n_jobs = -1,
seed = self.SEED )
model = model.fit(X_train,y_train)
# Calculate training set performance
#train_prediction_probs = model.predict_proba(X_train)
#train_predictions = model.predict(X_train)
#print (confusion_matrix(y_train, train_predictions))
#print (roc_auc_score(y_train, train_prediction_probs[:,1]))
# Calculate test set performance
#print(X_test.columns)
#print(X_train.columns)
self.test_prediction_probs = model.predict_proba(X_test)
self.test_predictions = model.predict(X_test)
#Model_Predictions = pd.Series(test_predictions)
#Model_Prediction_Probs = pd.Series(test_prediction_probs[:,[1]].flatten())
#print(np.count_nonzero(np.isnan(self.test_predictions)))
print (confusion_matrix(y_test, self.test_predictions))
print (roc_auc_score(y_test, self.test_prediction_probs[:,1]))
print (classification_report(y_test,self.test_predictions))
#print (log_loss(y_test,self.test_predictions))
#Calculate feature importance for each model
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
for f in range(len(X_test.columns)):
print("%d. %s (%f)" % (f + 1, X_test.columns[indices[f]], importances[indices[f]]))
self.model = model
self.Input = Input
def predict(self,NFIRS,ACS=[],predict_year='Current'):
pass
@staticmethod
def munge_dataset(top10,fires,ACS,tot_pop, n_years,test_year_idx):
years = top10.columns
test_loc = test_year_idx
# convert format for consistent output
X = fires.iloc[:,test_loc-n_years:test_loc].copy()
x_cols = X.columns
#X.columns = ['year-{}'.format(n_years-1 - year) for year in range(n_years-1)]
#sm = np.nansum(X, axis = 1 )
#mu = np.nanmean(X, axis = 1)
mx = np.nanmax(X, axis =1)
md = np.nanmedian(X,axis =1 )
X['Median'] = md
#X['Sum'] = sm
#X['Mean'] = mu
X['Max'] = mx
y = top10.iloc[:,test_loc]
#merge to get correct list of geoids then replace NaN with False
y = tot_pop.merge(y, how = 'left', left_index = True, right_index = True)
y = y.drop(columns = ['tot_population']).fillna(False)
y = y.squeeze()
# merge in ACS Data into X unless NFIRS-Only model
out_fires = []
if not ACS.empty:
# save copy for manual validation
out_fires = X.copy().merge(ACS, how ='right',left_index = True, right_index = True)
#out_fires = X.copy().merge(ACS, how ='left',left_index = True, right_index = True)
X=X[['Max','Median']] # drop all other NFIRS columns that have low feature importance scores
X = X.merge(ACS, how ='right',left_index = True, right_index = True)
#X = X.merge(ACS, how ='left',left_index = True, right_index = True)
return X,y,out_fires, x_cols
@staticmethod
def munge_dataset_test(top10,fires,ACS,tot_pop, n_years,test_year_idx):
years = top10.columns
test_loc = test_year_idx
# convert format for consistent output
X = fires.iloc[:,test_loc-n_years:test_loc].copy()
x_cols = X.columns
#X.columns = ['year-{}'.format(n_years-1 - year) for year in range(n_years-1)]
#sm = np.nansum(X, axis = 1 )
#mu = np.nanmean(X, axis = 1)
mx = np.nanmax(X, axis =1)
md = np.nanmedian(X,axis =1 )
X['Median'] = md
#X['Sum'] = sm
#X['Mean'] = mu
X['Max'] = mx
y = top10.iloc[:,test_loc]
#merge to get correct list of geoids then replace NaN with False
y = tot_pop.merge(y, how = 'left', left_index = True, right_index = True)
y = y.drop(columns = ['tot_population']).fillna(False)
y = y.squeeze()
# merge in ACS Data into X unless NFIRS-Only model
out_fires = []
if not ACS.empty:
# save copy for manual validation
#merge to get correct list of geoids, then replace NaN with 0
out_fires = X.copy().merge(ACS, how ='right',left_index = True, right_index = True)
X=X[['Max','Median']] # drop all other NFIRS columns that have low feature importance scores
#X = X.fillna(0)
X = X.merge(ACS, how ='right',left_index = True, right_index = True)
return X,y,out_fires, x_cols
@staticmethod
def resample_df(X,y,upsample=True,seed = 0):
from sklearn.utils import resample
# check which of our two classes is overly represented
if np.mean(y) > .5:
major,minor = 1,0
else:
major,minor = 0, 1
# Add Class feature to dataframe equal to our existing dependent variable
X['Class'] = y
df_major = X[X.Class == major ]
df_minor = X[X.Class == minor ]
if upsample:
df_minor_resampled = resample(df_minor,
replace = True,
n_samples = df_major.shape[0],
random_state = seed)
combined = pd.concat([df_major,df_minor_resampled])
# Debug
#print('minor class {}, major class {}'.format(df_minor_resampled.shape[0],
#df_major.shape[0]))
else: # downsample
df_major_resampled = resample(df_major,
replace = False,
n_samples = df_minor.shape[0],
random_state = seed)
combined = pd.concat([df_major_resampled,df_minor])
#print('minor class {}, major class {}'.format(df_minor.shape[0],
#df_major_resampled.shape[0]))
y_out = combined['Class']
X_out = combined.drop('Class', axis =1)
return X_out , y_out
class SmokeAlarmModels:
def __init__(self):
self.models = {}
def trainModels(self,ARC,ACS,SVI, ACS_variables,svi_use, data_path):
if not ACS_variables:
ACS_variables = ACS.data.columns
self.ACS_variables_used = ACS_variables
#ACS = ACS.data[ACS_variables]
self.arc = ARC.data
self.acs = ACS.data[ACS_variables]
self.acs_pop = ACS.tot_pop
self.svi = SVI.data
self.svi_use = svi_use
self.trainStatisticalModel()
return self.trainDLModel(data_path)
def trainStatisticalModel(self):
# single level models
for geo in ['State','County','Tract','Block_Group'] :
df = self.createSingleLevelSmokeAlarmModel(geo)
name = 'SmokeAlarmModel' + geo + '.csv'
df.index.name = 'geoid'
self.models[geo] = df
df.index = '#_' + df.index
out_path = utils.DATA['model-outputs'] /'Smoke_Alarm_Single_Level' / name
df.to_csv(out_path)
self.createMultiLevelSmokeAlarmModel()
def trainDLModel(self, data_path):
sm = self.models['MultiLevel'].copy()
if self.svi_use:
df = self.svi.copy()
else:
df = self.acs.copy()
sm = sm.reset_index()
sm['geoid'] = sm['geoid'].str[2:]
sm['tract'] = sm['geoid'].str[:-1]
sm.set_index('geoid', inplace = True)
sm_all = sm.copy()
sm = sm[ sm['geography'].isin(['tract','block_group']) ].copy()
rd = self.create_rurality_data(sm,data_path, True)
rd_all = self.create_rurality_data(sm_all, data_path)
if self.svi_use:
rd = rd['Population Density (per square mile), 2010'].to_frame()
rd_all = rd_all['Population Density (per square mile), 2010'].to_frame()
acs_pop = self.acs_pop#[self.acs_pop['tot_population'] >= 50 ]
rd = rd.filter(acs_pop.index, axis= 0)
mdl,X_test,y_test = self.trainXGB(X = rd, df = df, y = sm, predict = 'Presence', modeltype= 'XGBoost')
predictions = mdl.predict(rd_all.merge(df,how = 'left', left_index = True, right_index = True) )
sm_all['Predictions'] =np.clip(predictions,0,100)
sm_all.loc[:,['num_surveys','geography',
'detectors_found_prc',
'detectors_working_prc',
'Predictions'] ]
sm_all = sm_all.merge(rd_all['Population Density (per square mile), 2010'],how = 'left',left_index = True,right_index = True)
return sm_all
def trainXGB(self, X, df, y, predict, modeltype):
assert(predict in ['Presence', 'Working'])
model = xgb.XGBRegressor(objective = 'reg:squarederror',random_state = 0)
if predict == 'Presence':
y = y['detectors_found_prc']
elif predict =='Working':
y = y['detectors_working_prc']
# merge in ACS Data into X unless NFIRS-Only model
if not df.empty:
X = X.merge(df, how ='left',left_index = True, right_index = True)
y = y.filter(X.index)
# Create 80/20 training/testing set split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = .2, random_state = 0 )
model = model.fit(X_train,y_train)
# Calculate training set performance
train_predictions = model.predict(X_train)
print('-----Training_Performance------')
print(mean_squared_error(y_train, train_predictions))
print ('Test RMSE: {}'.format(mean_squared_error(y_train, train_predictions, squared = False)) )
print ('Test MAE: {}'.format(mean_absolute_error(y_train, train_predictions)) )
sns.scatterplot(y_train,train_predictions)
plt.show()
# Calculate test set performance
test_predictions = model.predict(X_test)
print ('-----Test Performance ----- ')
print ('Test RMSE: {}'.format(mean_squared_error(y_test, test_predictions, squared = False)) )
print ('Test MAE: {}'.format(mean_absolute_error(y_test, test_predictions)) )
sns.scatterplot(y_test,test_predictions)
plt.show()
print ('Test Correlation: {}'.format(pearsonr(y_test, test_predictions)) )
print ('Test R-squared: {}'.format(r2_score(y_test, test_predictions)) )
#Calculate feature importance for each model
if modeltype == 'XGBoost':
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
print("\n Feature ranking:")
for f in range(len(X_test.columns)):
print("%d. %s (%f)" % (f + 1, X_test.columns[indices[f]], importances[indices[f]]))
return model,X_test,y_test
def create_rurality_data(self, sm, data_path, subset_county = False):
#Rurality Data Munging
rd = | pd.read_csv( data_path/'Master Project Data'/'Tract Rurality Data.csv', dtype = {'Tract':'object'},encoding = 'latin-1' ) | pandas.read_csv |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
def split_date(df):
# Remove the empty lines
df = df.dropna(how="all")
# Create a new dateframe for only the date and time
date = df.Päivämäärä.str.split(expand=True)
# Change column names
date.columns = ["Weekday", "Day", "Month", "Year", "Hour"]
# Create the conversion dictionaries
days = {"ma":"Mon", "ti":"Tue", "ke":"Wed", "to":"Thu", "pe":"Fri", "la":"Sat", "su":"Sun"}
months = {"tammi":1, "helmi":2, "maalis":3, "huhti":4, "touko":5, "kesä":6, "heinä":7, "elo":8, "syys":9, "loka":10, "marras":11, "joulu":12}
# Function do to time conversion to hours
def time_to_hour(time):
string = str(time)
hour_part = string.split(":")[0]
return int(hour_part)
# Convert columns
date.Weekday = date.Weekday.map(days)
date.Day = date.Day.map(int)
date.Month = date.Month.map(months)
date.Year = date.Year.map(int)
date.Hour = date.Hour.map(time_to_hour)
return date
def split_date_continues():
# Get the original dataframe
df = pd.read_csv("src/Helsingin_pyorailijamaarat.csv", sep=";")
# Remove empty rows and columns
df = df.dropna(how="all", axis=1).dropna(how="all")
# Get the dateframe which has the date split into multiple columns
date = split_date(df)
# Drop the Päivämäärä column
pruned = df.drop(columns=["Päivämäärä"])
return | pd.concat([date, pruned], axis=1) | pandas.concat |
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Make model predictions using this load.py script. This loads in all models in this
directory and makes predictions on a target folder. Note that files in this target
directory will be featurized with the default features as specified by the settings.json.
Usage: python3 load.py [target directory] [sampletype] [target model directory]
Example: python3 load.py /Users/jim/desktop/allie/load_dir audio /Users/jim/desktop/gender_tpot_classifier
Alt Usage: python3 load.py
--> this just loads all the models and makes predictions in the ./load_dir
'''
import os, json, pickle, time, sys, shutil
import pandas as pd
import numpy as np
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def model_schema():
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()
}
return models
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
filetypes=list(set(filetypes))
return filetypes
def get_features(models, actual_model_dir, sampletype):
models=models['%s_models'%(sampletype)]
features=list()
for i in range(len(models)):
os.chdir(actual_model_dir+'/'+models[i])
temp_settings=json.load(open('settings.json'))
features=features+temp_settings['default_%s_features'%(sampletype)]
# get only the necessary features for all models
default_features=list(set(features))
return default_features
def featurize(features_dir, load_dir, model_dir, filetypes, models):
# contextually load the proper features based on the model information
actual_model_dir=prev_dir(features_dir)+'/models/'+model_dir
# get default features
sampletype=model_dir.split('_')[0]
default_features=get_features(models, actual_model_dir, sampletype)
# now change to proper directory for featurization
if model_dir=='audio_models' and 'audio' in filetypes:
os.chdir(features_dir+'/audio_features')
elif model_dir=='text_models' and 'text' in filetypes:
models=models['text_models']
os.chdir(features_dir+'/text_features')
elif model_dir=='image_models' and 'image' in filetypes:
models=models['image_models']
os.chdir(features_dir+'/image_features')
elif model_dir=='video_models' and 'video' in filetypes:
models=models['video_models']
os.chdir(features_dir+'/video_features')
elif model_dir=='csv_models' and 'csv' in filetypes:
models=models['csv_models']
os.chdir(features_dir+'/csv_features')
# call featurization API via default features
for i in range(len(default_features)):
print(os.getcwd())
os.system('python3 featurize.py %s %s'%(load_dir, default_features[i]))
def find_files(model_dir):
print(model_dir)
jsonfiles=list()
csvfiles=list()
if model_dir == 'audio_models':
listdir=os.listdir()
print(listdir)
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.wav') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'text_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.txt') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'image_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.png') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'video_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.mp4') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir =='csv_models':
# csv files are a little different here
listdir=os.listdir()
for i in range(len(listdir)):
csvfile='featurized_'+listdir[i]
if listdir[i].endswith('.csv') and csvfile in listdir:
csvfiles.append(csvfile)
else:
jsonfiles=[]
print(jsonfiles)
return jsonfiles, csvfiles
def make_predictions(sampletype, transformer, clf, modeltype, jsonfiles, csvfiles, default_features, classes, modeldata, model_dir):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
sampletype=sampletype.split('_')[0]
if sampletype != 'csv':
for k in range(len(jsonfiles)):
try:
g=json.load(open(jsonfiles[k]))
print(sampletype)
print(g)
features=list()
print(default_features)
for j in range(len(default_features)):
print(sampletype)
features=features+g['features'][sampletype][default_features[j]]['features']
labels=g['features'][sampletype][default_features[0]]['labels']
print(transformer)
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features).reshape(1, -1))).reshape(1, -1)
else:
features=np.array(features).reshape(1,-1)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif modeltype == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif modeltype== 'devol':
features=features.reshape(features.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(features).flatten()
elif modeltype=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(features).flatten()
elif mtype == 'r':
y_pred=clf.predict(feaures).flatten()
elif modeltype =='neuraxle':
y_pred=clf.transform(features)
elif modeltype=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
# update model in schema
# except:
# print('error %s'%(modeltype.upper()))
# try:
# get class from classes (assuming classification)
'''
X={'male': [1],
'female': [2],
'other': [3]}
then do a search of the values
names=list(X) --> ['male', 'female', 'other']
i1=X.values().index([1]) --> 0
names[i1] --> male
'''
# print(modeldata)
outputs=dict()
for i in range(len(classes)):
outputs[classes[i]]=[i]
names=list(outputs)
i1=list(outputs.values()).index(y_pred)
class_=classes[i1]
print(y_pred)
print(outputs)
print(i1)
print(class_)
try:
models=g['models']
except:
models=models=model_schema()
temp=models[sampletype]
if class_ not in list(temp):
temp[class_]= [modeldata]
else:
tclass=temp[class_]
try:
# make a list if it is not already to be compatible with deprecated versions
tclass.append(modeldata)
except:
tclass=[tclass]
tclass.append(modeldata)
temp[class_]=tclass
models[sampletype]=temp
g['models']=models
print(class_)
# update database
jsonfilename=open(jsonfiles[k],'w')
json.dump(g,jsonfilename)
jsonfilename.close()
except:
print('error making jsonfile %s'%(jsonfiles[k].upper()))
else:
try:
for k in range(len(csvfiles)):
if len(csvfiles[k].split('featurized')) == 2:
features=pd.read_csv(csvfiles[k])
oldfeatures=features
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features)))
else:
features=np.array(features)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = | pd.read_csv('test.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 17:19:41 2020
@author: <NAME>
"""
import pandas as pd
def int_br(x):
return int(x.replace('.',''))
def float_br(x):
return float(x.replace('.', '').replace(',','.'))
dia = '2805'
file_HU = '~/ownCloud/sesab/exporta_boletim_epidemiologico_csv_{}.csv'.format(dia)
datahu = pd.read_csv(file_HU, sep=';', decimal=',', converters={'CASOS CONFIRMADOS': int_br})
rday = 'DATA DO BOLETIM'
datahu[rday] = pd.to_datetime(datahu[rday], dayfirst=True)
datahu['DayNum'] = datahu[rday].dt.dayofyear
ref = pd.Timestamp(year=2020, month=2, day=27).dayofyear
datahu['ts0'] = datahu['DayNum'] - ref
colsutils = ['DATA DO BOLETIM', 'ts0', 'CASOS CONFIRMADOS', 'CASOS ENFERMARIA',
'CASOS UTI','TOTAL OBITOS']
dfi = datahu[colsutils]
dff = pd.DataFrame(columns=colsutils)
for i, day in enumerate(dfi['ts0'].unique()):
line = dfi[dfi['ts0'] == day]
line = line.sort_values(by=['DATA DO BOLETIM'], ascending=False)
line.reset_index(drop=True, inplace=True)
dff.loc[i] = line.loc[0]
cols = ['dates', 'ts0', 'infec', 'leitos', 'uti', 'dthcm']
dff.columns = cols
df0 = pd.read_csv('data_0.csv')
df0['dates'] = pd.to_datetime(df0['dates'], dayfirst=True)
dfnew = | pd.concat([df0, dff], sort=False) | pandas.concat |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from shrike import compliant_logging
from shrike.compliant_logging.constants import DataCategory
from shrike.compliant_logging.logging import (
CompliantLogger,
get_aml_context,
)
from shrike.compliant_logging.exceptions import PublicRuntimeError
from shrike.compliant_logging import is_eyesoff
from pathlib import Path
import logging
import pytest
import re
import sys
import vaex
import pandas as pd
import numpy as np
from pyspark.sql import SparkSession
from pyspark.sql.types import (
StructType,
StructField,
FloatType,
IntegerType,
StringType,
)
from unittest.mock import patch
from shrike._core import stream_handler
def test_basic_config():
logging.warning("before basic config")
logging.basicConfig()
logging.warning("warning from test_basic_config")
log = logging.getLogger("foo")
log.warning("warning from foo logger")
@pytest.mark.parametrize("level", ["debug", "info", "warning", "error", "critical"])
def test_data_category_and_log_info_works_as_expected(level):
compliant_logging.enable_compliant_logging()
log = logging.getLogger()
log.setLevel(level.upper())
assert isinstance(log, compliant_logging.logging.CompliantLogger)
with stream_handler(
log, format="%(prefix)s%(levelname)s:%(name)s:%(message)s"
) as context:
func = getattr(log, level)
func("PRIVATE")
func("public", category=DataCategory.PUBLIC)
logs = str(context)
assert re.search(r"^SystemLog\:.*public$", logs, flags=re.MULTILINE)
assert not re.search(r"^SystemLog\:.*\:PRIVATE", logs, flags=re.MULTILINE)
def test_non_category_aware_logging_works_as_expected():
compliant_logging.enable_compliant_logging()
log = logging.getLogger()
extra = {"test_name": "", "test_id": ""}
assert isinstance(log, compliant_logging.logging.CompliantLogger)
with stream_handler(
log, "%(test_name)s:%(test_id)s %(prefix)s%(levelname)s:%(name)s:%(message)s"
) as context:
log.log(1, "message", extra={"test_name": "Test", "test_id": 1})
log.debug("message", extra={"test_name": "Test2", "test_id": 0})
log.info("message", extra=extra)
log.warning("message", extra={"test_name": "My", "test_id": "a"})
try:
1 / 0
except Exception as e:
logging.error(
"Error at division",
exc_info=e,
stack_info=True,
extra={"test_name": "Test", "test_id": 1},
)
log.critical("message", extra=extra, stack_info=True)
logs = str(context)
assert re.search(r"^Test:1 Level 1:root:message$", logs, flags=re.MULTILINE)
assert re.search(r"^Test2:0 DEBUG:root:message$", logs, flags=re.MULTILINE)
assert re.search(r"^: INFO:root:message$", logs, flags=re.MULTILINE)
assert re.search(r"^My:a WARNING:root:message$", logs, flags=re.MULTILINE)
assert re.search(
r"^Test:1 ERROR:root:Error at division\nTraceback(.*\n){4}Stack",
logs,
flags=re.MULTILINE,
)
assert re.search(r"^: CRITICAL:root:message\nStack", logs, flags=re.MULTILINE)
@pytest.mark.parametrize("exec_type,message", [(ArithmeticError, "1+1 != 3")])
def test_exception_works_as_expected(exec_type, message):
compliant_logging.enable_compliant_logging()
log = logging.getLogger()
assert isinstance(log, compliant_logging.logging.CompliantLogger)
with stream_handler(log, "%(prefix)s%(levelname)s:%(name)s:%(message)s") as context:
try:
raise exec_type(message)
except exec_type:
log.error("foo", category=DataCategory.PUBLIC)
logs = str(context)
assert re.search(r"^SystemLog\:.*foo$", logs, flags=re.MULTILINE)
def test_all_the_stuff():
compliant_logging.enable_compliant_logging()
log = logging.getLogger("foo")
log.info("public", category=DataCategory.PUBLIC)
log.info("PRIVATE", category=DataCategory.PRIVATE)
log.info("PRIVATE2")
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Requires Python >= 3.8")
def test_enable_compliant_logging_sets_force():
# Pytest adds handlers to the root logger by default.
initial_handlers = list(logging.root.handlers)
compliant_logging.enable_compliant_logging()
assert len(logging.root.handlers) == 1
assert all(h not in logging.root.handlers for h in initial_handlers)
def test_warn_if_root_handlers_already_exist(capsys):
# Pytest adds handlers to the root logger by default.
compliant_logging.enable_compliant_logging()
# https://docs.pytest.org/en/stable/capture.html
stderr = capsys.readouterr().err
assert "SystemLog:The root logger already has handlers set!" in stderr
def test_deprecated_enable_confidential_logging(capsys):
"""Pytest the pending deprecation of enable_confidential_logging"""
compliant_logging.enable_confidential_logging()
# https://docs.pytest.org/en/stable/capture.html
stderr = capsys.readouterr().err
assert (
"SystemLog: The function enable_confidential_logging() is on the way "
"to deprecation. Please use enable_compliant_logging() instead." in stderr
)
def test_get_aml_context():
"""Pytest CompliantLogger._get_aml_context"""
assert (
compliant_logging.logging.CompliantLogger(
name="test_get_aml_context"
)._get_aml_context()
is get_aml_context()
)
def test_logging_aml_metric_single_value():
"""Pytest CompliantLogger.metric_value"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
log.metric_value(name="test_log_value", value=0, category=DataCategory.PUBLIC)
with stream_handler(log, "") as context:
log.metric_value(name="test_log_value", value=0, category=DataCategory.PRIVATE)
logs = str(context)
assert "NumbericMetric | test_log_value:None | 0" in logs
def test_logging_aml_metric_image():
"""Pytest CompliantLogger.metric_image"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
log.metric_image(
name="dummy_image",
path=str(Path(__file__).parent.resolve() / "data/dummy_image.png"),
category=DataCategory.PUBLIC,
)
log.metric_image(
path=str(Path(__file__).parent.resolve() / "data/dummy_image.png"),
category=DataCategory.PUBLIC,
)
with stream_handler(log, "") as context:
log.metric_image(
name="test_log_image",
path=str(Path(__file__).parent.resolve() / "data/dummy_image.png"),
category=DataCategory.PRIVATE,
)
logs = str(context)
assert "Unable to log image metric test_log_image as private, skipping." in logs
def test_logging_aml_metric_list_tuple():
"""Pytest CompliantLogger.metric_list"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
log.metric_list(
name="test_log_list", value=[1, 2, 3, 4], category=DataCategory.PUBLIC
)
log.metric_list(
name="test_log_tuple", value=("1", "2", "test"), category=DataCategory.PUBLIC
)
with stream_handler(log, "") as context:
log.metric_list(
name="test_log_empty_list", value=[], category=DataCategory.PUBLIC
)
log.metric_list(name="test_log_tupe_private", value=("1", "2", "test", None))
logs = str(context)
assert "List Value for Metric test_log_empty_list is empty. Skipping." in logs
assert "ListMetric | test_log_tupe_private | ['1', '2', 'test', None]" in logs
def test_logging_aml_metric_row():
"""Pytest COmpliantLogger.metric_row"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
log.metric_row(
name="test_row",
description="stats",
category=DataCategory.PUBLIC,
total_size=100,
file_count=200,
)
with stream_handler(log, "") as context:
log.metric_row(
name="test_row",
description="stats",
category=DataCategory.PRIVATE,
total_size=100,
file_count=200,
)
logs = str(context)
assert "RowMetric | test_row | total_size:100 | file_count:200" in logs
def test_logging_aml_metric_table():
"""Pytest CompliantLogger.metric_table"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
test_table1 = {"name": ["James", "Robert", "Michael"], "number": [2, 3, 1, 5]}
test_table2 = {"name": ["James", "Robert", "Michael"], "number": 2}
test_table3 = {"name": 2, "number": 4}
test_table4 = {"name": ["James", "Robert", "Michael"], "number": [2, 3, None]}
log.metric_table(
name="test_table1", value=test_table1, category=DataCategory.PUBLIC
)
with stream_handler(log, "") as context:
log.metric_table(name="test_table1", value=test_table1)
logs = str(context)
assert "TableMetric | test_table" in logs
assert "TableMetric | Index | name | number" in logs
assert "TableMetric | 00000 | James | 2 " in logs
assert "TableMetric | 00001 | Robert | 3 " in logs
assert "TableMetric | 00002 | Michael | 1 " in logs
assert "TableMetric | 00003 | | 5 " in logs
# Checking empty value
with stream_handler(log, "") as context:
log.metric_table(name="empty_input", value={}, category=DataCategory.PUBLIC)
logs = str(context)
assert "Dictionary Value for Metric empty_input is empty. Skipping." in logs
# Checking mixed types
with stream_handler(log, "") as context:
log.metric_table(
name="mixed_type", value=test_table2, category=DataCategory.PUBLIC
)
logs = str(context)
assert (
"The provided dictionary for metric mixed_type appears to be unstructured!"
in logs
)
log.metric_table(
name="test_table3", value=test_table3, category=DataCategory.PUBLIC
)
with stream_handler(log, "") as context:
log.metric_table(name="test_table4", value=test_table4)
logs = str(context)
assert "TableMetric | test_table" in logs
assert "TableMetric | Index | name | number" in logs
assert "TableMetric | 00000 | James | 2 " in logs
assert "TableMetric | 00001 | Robert | 3 " in logs
assert "TableMetric | 00002 | Michael | " in logs
def test_logging_aml_metric_residual():
"""Pytest CompliantLogger.metric_residual"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
# Testing vaex dataframe
test_input1 = vaex.from_arrays(
x=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], y=[1.1, 2.0, 3.2, 3.8, 5, 6, 7, 8, 9, 10]
)
log.metric_residual(
name="test_log_residual",
value=test_input1,
col_predict="x",
col_target="y",
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_residual(
name="test_log_residual",
value=test_input1,
category=DataCategory.PUBLIC,
)
with stream_handler(log, "") as context:
log.metric_residual(
name="test_log_residual",
value=test_input1,
col_predict="x",
col_target="y",
)
logs = str(context)
assert "Logging Residuals to text is not yet implemented" in logs
# Testing spark dataframe
test_input2 = (
SparkSession.builder.appName("SparkUnitTests")
.getOrCreate()
.createDataFrame(
data=[(1.0, 1.1), (2.0, 2.0), (3.0, 3.1), (4.0, 3.8), (5.0, 5.2)],
schema=StructType(
[
StructField("prediction", FloatType(), True),
StructField("target", FloatType(), True),
]
),
)
)
log.metric_residual(
name="test_log_residual",
value=test_input2,
col_predict="prediction",
col_target="target",
category=DataCategory.PUBLIC,
)
# Testing panda dataframe
test_input3 = pd.DataFrame(
[[1.0, 1.1], [2.0, 2.0], [3.0, 3.1], [4.0, 3.8], [100.0, 5.2]],
columns=["prediction", "target"],
)
log.metric_residual(
name="test_log_residual",
value=test_input3,
col_predict="prediction",
col_target="target",
category=DataCategory.PUBLIC,
)
# Testing un-supported data type
test_input4 = [1, 2, 3, 4]
with pytest.raises(PublicRuntimeError):
log.metric_residual(
name="test_log_residual",
value=test_input4,
col_predict="x",
col_target="y",
category=DataCategory.PUBLIC,
)
# Testing dict
test_input5 = {
"target": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"prediction": [1.1, 2.0, 3.2, 3.8, 5, 6, 7, 8, 9, 10],
}
test_input6 = {
"schema_type": "residuals",
"schema_version": "1.0.0",
"data": {
"bin_edges": [0.0, 0.25, 0.5, 0.75, 1.0],
"bin_counts": [0.0, 0.0, 0.0, 0.1],
},
}
test_input7 = {
"target": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"prediction": [1.1],
}
log.metric_residual(
name="test_log_residual",
value=test_input5,
col_predict="prediction",
col_target="target",
bin_edges=3,
category=DataCategory.PUBLIC,
)
log.metric_residual(
name="test_log_residual",
value=test_input6,
col_predict="prediction",
col_target="target",
bin_edges=4,
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_residual(
name="test_log_residual",
value=test_input7,
col_predict="prediction",
col_target="target",
category=DataCategory.PUBLIC,
)
def test_logging_aml_metric_predictions():
"""Pytest CompliantLogger.metric_predictions"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
# Testing vaex dataframe
test_input1 = vaex.from_arrays(
x=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], y=[1.1, 2.0, 3.2, 3.8, 5, 6, 7, 8, 9, 10]
)
log.metric_predictions(
name="test_log_predictions",
value=test_input1,
col_predict="x",
col_target="y",
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_predictions(
name="test_log_predictions",
value=test_input1,
category=DataCategory.PUBLIC,
)
with stream_handler(log, "") as context:
log.metric_predictions(
name="test_log_predictions",
value=test_input1,
col_predict="x",
col_target="y",
)
logs = str(context)
assert "Logging Predictions to text is not yet implemented" in logs
# Testing spark dataframe
test_input2 = (
SparkSession.builder.appName("SparkUnitTests")
.getOrCreate()
.createDataFrame(
data=[(1.0, 1.1), (2.0, 2.0), (3.0, 3.1), (4.0, 3.8), (5.0, 5.2)],
schema=StructType(
[
StructField("prediction", FloatType(), True),
StructField("target", FloatType(), True),
]
),
)
)
log.metric_predictions(
name="test_log_predictions",
value=test_input2,
col_predict="prediction",
col_target="target",
category=DataCategory.PUBLIC,
)
# Testing panda dataframe
test_input3 = pd.DataFrame(
[[1.0, 1.1], [2.0, 2.0], [3.0, 3.1], [4.0, 3.8], [100.0, 5.2]],
columns=["prediction", "target"],
)
log.metric_predictions(
name="test_log_predictions",
value=test_input3,
col_predict="prediction",
col_target="target",
category=DataCategory.PUBLIC,
)
# Testing un-supported data type
test_input4 = [1, 2, 3, 4]
with pytest.raises(PublicRuntimeError):
log.metric_predictions(
name="test_log_predictions",
value=test_input4,
col_predict="x",
col_target="y",
category=DataCategory.PUBLIC,
)
# Testing dict
test_input5 = {
"target": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"prediction": [1.1, 2.0, 3.2, 3.8, 5, 6, 7, 8, 9, 10],
}
test_input6 = {
"schema_type": "predictions",
"schema_version": "1.0.0",
"data": {
"bin_averages": [1, 2, 3, 4],
"bin_errors": [0.0, 0.0, 0.0, 0.0],
"bin_counts": [0, 0, 0, 0],
"bin_edges": [0.0, 0.25, 0.5, 0.75, 1.0],
},
}
test_input7 = {
"target": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"prediction": [1.1],
}
log.metric_predictions(
name="test_log_predictions",
value=test_input5,
col_predict="prediction",
col_target="target",
bin_edges=3,
category=DataCategory.PUBLIC,
)
log.metric_predictions(
name="test_log_predictions",
value=test_input6,
col_predict="prediction",
col_target="target",
bin_edges=4,
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_predictions(
name="test_log_predictions",
value=test_input7,
col_predict="prediction",
col_target="target",
category=DataCategory.PUBLIC,
)
def test_logging_aml_metric_confusion_matrix():
"""Pytest CompliantLogger.metric_confusion_matrix"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
# Testing vaex dataframe
test_input1 = vaex.from_arrays(
x=["cat", "ant", "cat", "cat", "ant", "bird"],
y=["ant", "ant", "cat", "cat", "ant", "cat"],
)
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input1,
idx_true="x",
idx_pred="y",
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input1,
category=DataCategory.PUBLIC,
)
with stream_handler(log, "") as context:
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input1,
idx_true="x",
idx_pred="y",
)
logs = str(context)
assert "Logging Confusion Matrices to text is not yet implemented" in logs
# Testing spark dataframe
test_input2 = (
SparkSession.builder.appName("SparkUnitTests")
.getOrCreate()
.createDataFrame(
data=[(1, 1), (2, 2), (1, 2), (2, 2), (1, 2)],
schema=StructType(
[
StructField("prediction", IntegerType(), True),
StructField("target", IntegerType(), True),
]
),
)
)
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input2,
idx_true="target",
idx_pred="prediction",
category=DataCategory.PUBLIC,
)
# Testing panda dataframe
test_input3 = pd.DataFrame(
[[1, 1], [2, 2], [1, 2], [1, 2], [1, 1]],
columns=["prediction", "target"],
)
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input3,
idx_true="target",
idx_pred="prediction",
category=DataCategory.PUBLIC,
)
# Testing un-supported data type
test_input4 = [1, 2, 3, 4]
with pytest.raises(PublicRuntimeError):
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input4,
idx_true="target",
idx_pred="prediction",
category=DataCategory.PUBLIC,
)
# Testing dict
test_input5 = {
"target": ["cat", "ant", "cat", "cat", "ant", "bird"],
"prediction": ["ant", "ant", "cat", "cat", "ant", "cat"],
}
test_input6 = {
"schema_type": "confusion_matrix",
"schema_version": "1.0.0",
"data": {
"class_labels": [1, 2],
"matrix": [[2, 0], [2, 1]],
},
}
test_input7 = {
"target": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"prediction": [1.1],
}
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input5,
idx_true="target",
idx_pred="prediction",
category=DataCategory.PUBLIC,
)
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input6,
idx_true="target",
idx_pred="prediction",
labels=["cat", "dog"],
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_confusion_matrix(
name="test_log_confusion_matrix",
value=test_input7,
idx_true="target",
idx_pred="prediction",
category=DataCategory.PUBLIC,
)
def test_logging_aml_metric_accuracy_table():
"""Pytest CompliantLogger.metric_accuracy_table"""
compliant_logging.enable_compliant_logging(use_aml_metrics=True)
log = logging.getLogger()
# Testing vaex dataframe
test_input1 = vaex.from_arrays(x=[0.1, 0.3, 0.7], y=["a", "b", "c"])
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input1,
col_predict="x",
col_target="y",
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input1,
category=DataCategory.PUBLIC,
)
with stream_handler(log, "") as context:
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input1,
col_predict="x",
col_target="y",
)
logs = str(context)
assert "Logging Accuracy Tables to text is not yet implemented" in logs
# Testing spark dataframe
test_input2 = (
SparkSession.builder.appName("SparkUnitTests")
.getOrCreate()
.createDataFrame(
data=[(0.1, "a"), (0.3, "b"), (0.6, "c")],
schema=StructType(
[
StructField("probability", FloatType(), True),
StructField("labels", StringType(), True),
]
),
)
)
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input2,
col_predict="probability",
col_target="labels",
category=DataCategory.PUBLIC,
)
# Testing panda dataframe
test_input3 = pd.DataFrame(
[[0.1, "a"], [0.3, "b"], [0.6, "c"]],
columns=["probability", "labels"],
)
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input3,
col_predict="probability",
col_target="labels",
category=DataCategory.PUBLIC,
)
# Testing un-supported data type
test_input4 = [1, 2, 3, 4]
with pytest.raises(PublicRuntimeError):
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input4,
col_predict="probability",
col_target="labels",
category=DataCategory.PUBLIC,
)
# Testing dict
test_input5 = {
"probability": [0.1, 0.3, 0.7],
"labels": ["a", "b", "c"],
}
test_input6 = {
"schema_type": "accuracy_table",
"schema_version": "1.0.1",
"data": {
"probability_tables": [
[[1, 2, 0, 0], [0, 2, 0, 1], [0, 1, 1, 1], [0, 0, 2, 1], [0, 0, 2, 1]],
[[1, 2, 0, 0], [1, 1, 1, 0], [0, 1, 1, 1], [0, 0, 2, 1], [0, 0, 2, 1]],
[[1, 2, 0, 0], [1, 1, 1, 0], [1, 0, 2, 0], [0, 0, 2, 1], [0, 0, 2, 1]],
],
"precentile_tables": [
[[1, 2, 0, 0], [0, 2, 0, 1], [0, 2, 0, 1], [0, 0, 2, 1], [0, 0, 2, 1]],
[[1, 1, 1, 0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 2, 1], [0, 0, 2, 1]],
[[1, 0, 2, 0], [0, 0, 2, 1], [0, 0, 2, 1], [0, 0, 2, 1], [0, 0, 2, 1]],
],
"probability_thresholds": [0.0, 0.25, 0.5, 0.75, 1.0],
"percentile_thresholds": [0.0, 0.01, 0.24, 0.98, 1.0],
"class_labels": ["a", "b", "c"],
},
}
test_input7 = {
"probability": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"labels": [1.1],
}
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input5,
col_predict="probability",
col_target="labels",
category=DataCategory.PUBLIC,
)
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input6,
col_predict="probability",
col_target="labels",
category=DataCategory.PUBLIC,
)
with pytest.raises(PublicRuntimeError):
log.metric_accuracy_table(
name="test_log_accuracy_table",
value=test_input7,
col_predict="probability",
col_target="labels",
category=DataCategory.PUBLIC,
)
def test_convert_obj():
"""Pytest CompliantLogger._convert_obj"""
logger = CompliantLogger(name="")
assert logger._convert_obj("tests", category=DataCategory.PUBLIC) == "tests"
assert "Spark DataFrame (Row Count: 5 / Column Count: 2)" in logger._convert_obj(
(
SparkSession.builder.appName("SparkUnitTests")
.getOrCreate()
.createDataFrame(
data=[(1, 1), (2, 2), (1, 2), (2, 2), (1, 2)],
schema=StructType(
[
StructField("prediction", IntegerType(), True),
StructField("target", IntegerType(), True),
]
),
)
),
category=DataCategory.PUBLIC,
)
assert "Vaex DataFrame (Row Count: 5 / Column Count: 2)" in logger._convert_obj(
vaex.from_arrays(x=np.arange(5), y=np.arange(5) ** 2)
)
test_df = pd.DataFrame(
[["Tom", 10], ["Nick", 15], ["John", 14]], columns=["Name", "Age"]
)
assert "Pandas DataFrame (Row Count: 3 / Column Count: 2)" in logger._convert_obj(
test_df
)
test_series = | pd.Series([1, 2, 3, 4, 5]) | pandas.Series |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_rolling_quantile(self, q, raw):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(
self, quantile, interpolation, data
):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param(self):
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile("foo")
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
def f(x):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name="apply", func=f, raw=raw)
def test_rolling_std(self, raw):
self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw)
self._check_moment_func(
lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw
)
def test_rolling_std_1obs(self):
vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self, raw):
self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw)
self._check_moment_func(
lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw
)
@td.skip_if_no_scipy
def test_rolling_skew(self, raw):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw)
@td.skip_if_no_scipy
def test_rolling_kurt(self, raw):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw)
def _check_moment_func(
self,
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
**kwargs,
):
# inject raw
if name == "apply":
kwargs = copy.copy(kwargs)
kwargs["raw"] = raw
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample("B").mean()
frame = self.frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
series_result = get_result(series, window=win, min_periods=0)
frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1, min_periods=minp)
expected = get_result(self.series, len(self.series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1, min_periods=0)
expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=minp, center=True
)
frame_rs = get_result(
self.frame, window=25, min_periods=minp, center=True
)
else:
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=0, center=True
)
frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestRollingMomentsConsistency(Base):
def setup_method(self, method):
self._create_data()
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(self, func):
check_pairwise_moment(self.frame, "rolling", func, window=10, min_periods=5)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{
k: getattr(self.frame[k].rolling(window=10), method)(frame2[k])
for k in self.frame
}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name,
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
_flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary():
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr():
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types():
# GH12373
types_test = [np.dtype(f"f{width}") for width in [4, 8]]
types_test.extend(
[np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"]
)
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
def test_moment_functions_zero_length():
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
functions = [
lambda x: x.rolling(window=10).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
def test_moment_functions_zero_length_pairwise():
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns= | Index([]) | pandas.Index |
import os
import glob
import json
import argparse
import numpy as np
import pandas as pd
import joblib
from azureml.core.model import Model
from azureml.core import Run
current_run = None
model = None
def init():
print("Started batch scoring by running init()")
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, help='Model to use for batch scoring')
args, _ = parser.parse_known_args()
global current_run
current_run = Run.get_context()
print(f'Arguments: {args}')
print(f'Model name: {args.model_name}')
global model
model_path = Model.get_model_path(args.model_name)
model = joblib.load(model_path)
def run(file_list):
try:
output_df = pd.DataFrame(columns=["Sno", "ProbaGoodCredit", "ProbaBadCredit"])
for filename in file_list:
df = | pd.read_csv(filename) | pandas.read_csv |
# -*- encoding: utf-8 -*-
"""
===============================
Test and Train data with Pandas
===============================
*auto-sklearn* can automatically encode categorical columns using a label/ordinal encoder.
This example highlights how to properly set the dtype in a DataFrame for this to happen,
and showcase how to input also testing data to autosklearn.
The X_train/y_train arguments to the fit function will be used to fit the scikit-learn model,
whereas the X_test/y_test will be used to evaluate how good this scikit-learn model generalizes
to unseen data (i.e. data not in X_train/y_train). Using test data is a good mechanism to measure
if the trained model suffers from overfit, and more details can be found on `evaluating estimator
performance <https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation>`_.
This example further highlights through a plot, the best individual models found by *auto-sklearn*
through time (under single_best_optimization_score/single_best_test_score's legend).
It also shows the training and test performance of the ensemble build using the best
performing models (under ensemble_optimization_score and ensemble_test_score respectively).
There is also support to manually indicate the feature types (whether a column is categorical
or numerical) via the argument feat_types from fit(). This is important when working with
list or numpy arrays as there is no per-column dtype (further details in the example
`Continuous and categorical data <example_feature_types.html>`_).
"""
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.model_selection
import sklearn.datasets
import sklearn.metrics
from smac.tae import StatusType
import autosklearn.classification
def get_runhistory_models_performance(automl):
metric = cls.automl_._metric
data = automl.automl_.runhistory_.data
performance_list = []
for run_key, run_value in data.items():
if run_value.status != StatusType.SUCCESS:
# Ignore crashed runs
continue
# Alternatively, it is possible to also obtain the start time with ``run_value.starttime``
endtime = pd.Timestamp(time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(run_value.endtime)))
val_score = metric._optimum - (metric._sign * run_value.cost)
test_score = metric._optimum - (metric._sign * run_value.additional_info['test_loss'])
train_score = metric._optimum - (metric._sign * run_value.additional_info['train_loss'])
performance_list.append({
'Timestamp': endtime,
'single_best_optimization_score': val_score,
'single_best_test_score': test_score,
'single_best_train_score': train_score,
})
return pd.DataFrame(performance_list)
############################################################################
# Data Loading
# ============
# Using Australian dataset https://www.openml.org/d/40981.
# This example will use the command fetch_openml, which will
# download a properly formatted dataframe if you use as_frame=True.
# For demonstration purposes, we will download a numpy array using
# as_frame=False, and manually creating the pandas DataFrame
X, y = sklearn.datasets.fetch_openml(data_id=40981, return_X_y=True, as_frame=False)
# bool and category will be automatically encoded.
# Targets for classification are also automatically encoded
# If using fetch_openml, data is already properly encoded, below
# is an example for user reference
X = pd.DataFrame(
data=X,
columns=['A' + str(i) for i in range(1, 15)]
)
desired_boolean_columns = ['A1']
desired_categorical_columns = ['A4', 'A5', 'A6', 'A8', 'A9', 'A11', 'A12']
desired_numerical_columns = ['A2', 'A3', 'A7', 'A10', 'A13', 'A14']
for column in X.columns:
if column in desired_boolean_columns:
X[column] = X[column].astype('bool')
elif column in desired_categorical_columns:
X[column] = X[column].astype('category')
else:
X[column] = pd.to_numeric(X[column])
y = | pd.DataFrame(y, dtype='category') | pandas.DataFrame |
import argparse
import os
import sys
import time
from datetime import datetime, timedelta
from pathlib import Path
from pprint import pprint
import pandas as pd
import schedule
sys.path.insert(1, str(Path('src/marktech').resolve()))
import scrape_static
scraper = scrape_static.StaticPageScraper(verbose=0)
def get_stocks(filter=None):
default_html_locations = {
"current_values": [
"#quotes_summary_current_data",
],
"secondary_data": [
"#quotes_summary_secondary_data",
],
"hr1_tech_content": [
"#techinalContent",
],
}
stocks = {
#### NSE/BSE
'HLL': {
'url': 'https://www.investing.com/equities/hindustan-unilever-technical',
'html_locations': default_html_locations,
},
'RELI': {
'url': 'https://www.investing.com/equities/reliance-industries-technical',
'html_locations': default_html_locations,
},
'INFY': {
'url': 'https://www.investing.com/equities/infosys-technical',
'html_locations': default_html_locations,
},
'HDBK': {
'url': 'https://www.investing.com/equities/hdfc-bank-ltd-technical',
'html_locations': default_html_locations,
},
#### NASDAQ / NYSE
'RDFN': {
'url': 'https://www.investing.com/equities/redfin-technical',
'html_locations': default_html_locations,
},
'F': {
'url': 'https://www.investing.com/equities/ford-motor-co-technical',
'html_locations': default_html_locations,
},
'T': {
'url': 'https://www.investing.com/equities/at-t-technical',
'html_locations': default_html_locations,
},
'IRBT': {
'url': 'https://www.investing.com/equities/irobot-corp-technical',
'html_locations': default_html_locations,
},
}
if not filter:
return stocks
return {filter: stocks[filter]}
def generate_lists(stocks):
# size of all the arrays arrays is same.
# size of all the elements of i-th index of `STATIC_PAGE_LOCATIONS`
# is same as size of all the elements of i-th index of `STATIC_PAGE_LOCATION_NAMES`.
STATIC_PAGE_SYMBOLS = []
STATIC_PAGE_URLS = []
STATIC_PAGE_LOCATIONS = []
STATIC_PAGE_LOCATION_NAMES = []
for symbol in sorted(list(stocks.keys())):
STATIC_PAGE_SYMBOLS.append(symbol)
STATIC_PAGE_URLS.append(stocks[symbol]['url'])
url_loc_names = []
url_locations = []
for loc_name in sorted(list(stocks[symbol]['html_locations'].keys())):
url_locations.append(stocks[symbol]['html_locations'][loc_name])
url_loc_names.append(loc_name)
STATIC_PAGE_LOCATIONS.append(url_locations)
STATIC_PAGE_LOCATION_NAMES.append(url_loc_names)
return STATIC_PAGE_SYMBOLS, STATIC_PAGE_URLS, STATIC_PAGE_LOCATIONS, STATIC_PAGE_LOCATION_NAMES
def write_data(utc_time, data, STATIC_PAGE_SYMBOLS, STATIC_PAGE_URLS, STATIC_PAGE_LOCATIONS, STATIC_PAGE_LOCATION_NAMES, SAVE_DIR):
for idx in range(len(data)):
symbol = STATIC_PAGE_SYMBOLS[idx]
write_path = SAVE_DIR / f'{symbol}.csv'
raw_texts = data[idx]
names = STATIC_PAGE_LOCATION_NAMES[idx]
row = {'utc_time': [utc_time]}
for name, raw in zip(names, raw_texts):
row[name] = [raw]
mode, header = 'a', False
if not write_path.exists():
mode, header = 'w', True
| pd.DataFrame(row) | pandas.DataFrame |
# -*- encoding:utf-8 -*-
"""
中间层,从上层拿到x,y,df
拥有create estimator
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import functools
from enum import Enum
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, ClassifierMixin, RegressorMixin, clone
from sklearn import metrics
from sklearn.datasets import load_iris
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn.preprocessing import label_binarize, StandardScaler, binarize
from . import ABuMLExecute
from .ABuMLCreater import AbuMLCreater
from ..CoreBu import ABuEnv
from ..CoreBu.ABuFixes import train_test_split, cross_val_score, mean_squared_error_scorer, six
from ..UtilBu import ABuFileUtil
from ..UtilBu.ABuProgress import AbuProgress
from ..UtilBu.ABuDTUtil import warnings_filter
from ..UtilBu.ABuDTUtil import params_to_numpy
from ..CoreBu.ABuFixes import signature
__author__ = '阿布'
__weixin__ = 'abu_quant'
p_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir))
ML_TEST_FILE = os.path.join(p_dir, 'RomDataBu/ml_test.csv')
class _EMLScoreType(Enum):
"""针对有监督学习的度量支持enum"""
"""有监督学习度量准确率"""
E_SCORE_ACCURACY = 'accuracy'
"""有监督学习度量mse"""
E_SCORE_MSE = mean_squared_error_scorer
"""有监督学习度量roc_auc"""
E_SCORE_ROC_AUC = 'roc_auc'
class EMLFitType(Enum):
"""支持常使用的学习器类别enum"""
"""有监督学习:自动选择,根据y的label数量,> 10使用回归否则使用分类"""
E_FIT_AUTO = 'auto'
"""有监督学习:回归"""
E_FIT_REG = 'reg'
"""有监督学习:分类"""
E_FIT_CLF = 'clf'
"""无监督学习:HMM"""
E_FIT_HMM = 'hmm'
"""无监督学习:PCA"""
E_FIT_PCA = 'pca'
"""无监督学习:KMEAN"""
E_FIT_KMEAN = 'kmean'
def entry_wrapper(support=(EMLFitType.E_FIT_CLF, EMLFitType.E_FIT_REG, EMLFitType.E_FIT_HMM,
EMLFitType.E_FIT_PCA, EMLFitType.E_FIT_KMEAN)):
"""
类装饰器函数,对关键字参数中的fiter_type进行标准化,eg,fiter_type参数是'clf', 转换为EMLFitType(fiter_type)
赋予self.fiter_type,检测当前使用的具体学习器不在support参数中不执行被装饰的func函数了,打个log返回
:param support: 默认 support=(EMLFitType.E_FIT_CLF, EMLFitType.E_FIT_REG, EMLFitType.E_FIT_HMM,
EMLFitType.E_FIT_PCA, EMLFitType.E_FIT_KMEAN)
即支持所有,被装饰的函数根据自身特性选择装饰参数
"""
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
org_fiter_type = self.fiter_type
if 'fiter_type' in kwargs:
# 如果传递了fiter_type参数,pop出来
fiter_type = kwargs.pop('fiter_type')
# 如果传递的fiter_type参数是str,eg:'clf', 转换为EMLFitType(fiter_type)
if isinstance(fiter_type, six.string_types):
fiter_type = EMLFitType(fiter_type)
self.fiter_type = fiter_type
check_support = self.fiter_type
if self.fiter_type == EMLFitType.E_FIT_AUTO:
# 把auto的归到具体的分类或者回归
check_y = self.y
if 'y' in kwargs:
check_y = kwargs['y']
check_support = EMLFitType.E_FIT_CLF if len(np.unique(check_y)) <= 10 else EMLFitType.E_FIT_REG
if check_support not in support:
# 当前使用的具体学习器不在support参数中不执行被装饰的func函数了,打个log返回
self.log_func('{} not support {}!'.format(func.__name__, check_support.value))
# 如果没能成功执行把类型再切换回来
self.fiter_type = org_fiter_type
return
return func(self, *args, **kwargs)
return wrapper
return decorate
# noinspection PyUnresolvedReferences
class AbuML(object):
"""封装有简单学习及无监督学习方法以及相关操作类"""
@classmethod
def create_test_fiter(cls):
"""
类方法:使用iris数据构造AbuML对象,测试接口,通过简单iris数据对方法以及策略进行验证
iris数据量小,如需要更多数据进行接口测试可使用create_test_more_fiter接口
eg: iris_abu = AbuML.create_test_fiter()
:return: AbuML(x, y, df),
eg: df
y x0 x1 x2 x3
0 0 5.1 3.5 1.4 0.2
1 0 4.9 3.0 1.4 0.2
2 0 4.7 3.2 1.3 0.2
3 0 4.6 3.1 1.5 0.2
4 0 5.0 3.6 1.4 0.2
.. .. ... ... ... ...
145 2 6.7 3.0 5.2 2.3
146 2 6.3 2.5 5.0 1.9
147 2 6.5 3.0 5.2 2.0
148 2 6.2 3.4 5.4 2.3
149 2 5.9 3.0 5.1 1.8
"""
iris = load_iris()
x = iris.data
"""
eg: iris.data
array([[ 5.1, 3.5, 1.4, 0.2],
[ 4.9, 3. , 1.4, 0.2],
[ 4.7, 3.2, 1.3, 0.2],
[ 4.6, 3.1, 1.5, 0.2],
[ 5. , 3.6, 1.4, 0.2],
....... ....... .......
[ 6.7, 3. , 5.2, 2.3],
[ 6.3, 2.5, 5. , 1.9],
[ 6.5, 3. , 5.2, 2. ],
[ 6.2, 3.4, 5.4, 2.3],
[ 5.9, 3. , 5.1, 1.8]])
"""
y = iris.target
"""
eg: y
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
"""
x_df = pd.DataFrame(x, columns=['x0', 'x1', 'x2', 'x3'])
y_df = pd.DataFrame(y, columns=['y'])
df = y_df.join(x_df)
return AbuML(x, y, df)
@classmethod
def load_ttn_raw_df(cls):
"""
读取泰坦尼克测试数据
:return: pd.DataFrame对象,from接口pd.read_csv(train_csv_path)
"""
train_csv_path = ML_TEST_FILE
if not ABuFileUtil.file_exist(train_csv_path):
# 泰坦尼克数据文件如果不存在RuntimeError
raise RuntimeError('{} not exist, please down a ml_test.csv!'.format(train_csv_path))
# 训练文件使用read_csv从文件读取
return pd.read_csv(train_csv_path)
@classmethod
@warnings_filter
def create_test_more_fiter(cls):
"""
类方法:使用泰坦尼克数据构造AbuML对象,测试接口,对方法以及策略进行验证 比iris数据多
eg: ttn_abu = AbuML.create_test_more_fiter()
:return: AbuML(x, y, df),构造AbuML最终的泰坦尼克数据形式如:
eg: df
Survived SibSp Parch Cabin_No Cabin_Yes Embarked_C Embarked_Q \
0 0 1 0 1 0 0 0
1 1 1 0 0 1 1 0
2 1 0 0 1 0 0 0
3 1 1 0 0 1 0 0
4 0 0 0 1 0 0 0
5 0 0 0 1 0 0 1
6 0 0 0 0 1 0 0
7 0 3 1 1 0 0 0
8 1 0 2 1 0 0 0
9 1 1 0 1 0 1 0
.. ... ... ... ... ... ... ...
Embarked_S Sex_female Sex_male Pclass_1 Pclass_2 Pclass_3 \
0 1 0 1 0 0 1
1 0 1 0 1 0 0
2 1 1 0 0 0 1
3 1 1 0 1 0 0
4 1 0 1 0 0 1
5 0 0 1 0 0 1
6 1 0 1 1 0 0
7 1 0 1 0 0 1
8 1 1 0 0 0 1
9 0 1 0 0 1 0
.. ... ... ... ... ... ...
Age_scaled Fare_scaled
0 -0.5614 -0.5024
1 0.6132 0.7868
2 -0.2677 -0.4889
3 0.3930 0.4207
4 0.3930 -0.4863
5 -0.4271 -0.4781
6 1.7877 0.3958
7 -2.0295 -0.2241
8 -0.1943 -0.4243
.. ... ...
"""
raw_df = cls.load_ttn_raw_df()
def set_missing_ages(p_df):
"""
对数据中缺失的年龄使用RandomForestRegressor进行填充
"""
from sklearn.ensemble import RandomForestRegressor
age_df = p_df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
y_inner = known_age[:, 0]
x_inner = known_age[:, 1:]
rfr_inner = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr_inner.fit(x_inner, y_inner)
predicted_ages = rfr_inner.predict(unknown_age[:, 1::])
p_df.loc[(p_df.Age.isnull()), 'Age'] = predicted_ages
return p_df, rfr_inner
def set_cabin_type(p_df):
"""
对数据中缺失的Cabin处理
"""
p_df.loc[(p_df.Cabin.notnull()), 'Cabin'] = "Yes"
p_df.loc[(p_df.Cabin.isnull()), 'Cabin'] = "No"
return p_df
raw_df, rfr = set_missing_ages(raw_df)
raw_df = set_cabin_type(raw_df)
# 对多label使用get_dummies进行离散二值化处理
dummies_cabin = pd.get_dummies(raw_df['Cabin'], prefix='Cabin')
"""
eg:
data_train['Cabin']:
0 No
1 Yes
2 No
3 Yes
4 No
5 No
6 Yes
7 No
8 No
9 No
...
dummies_cabin:
Cabin_No Cabin_Yes
0 1 0
1 0 1
2 1 0
3 0 1
4 1 0
5 1 0
6 0 1
7 1 0
8 1 0
9 1 0
.. ... ...
"""
dummies__embarked = | pd.get_dummies(raw_df['Embarked'], prefix='Embarked') | pandas.get_dummies |
"""
Data Set Information: Dataset named “Online Retail II” includes UK based online store between 01/12/2009 - 09/12/2011 which included the sales. Souvenirs included in the product catalog of this company and these can be considered as promotional items Also known that most of that company’s customers are wholesalers.
#Link to the Data Set: https://archive.ics.uci.edu/ml/datasets/Online+Retail+II
Column/Variable Information: InvoiceNo: Invoice number.
Unique to each transaction, If this code starts with C, means the operation is aborted.
StockCode: Product code. Unique number for each product.
Description: Product name Quantity: Number of products. How many of the products sold on the invoices
InvoiceDate: Invoice date and time.
UnitPrice: Product price (in GBP)
CustomerID: Unique customer number
Country: The name of the country where the customer lives.
Aim identification of customers behaviours towards the business problem and clustering these groups according to the behaviors
Clue:
Those who shows common behaviors will be in the same group. Then feedbacks will be given specifically on the development of techniques for sales and marketing to these groups.
#
PROJECT: Customer Segmentation with RFM
#
An e-commerce company wants to segment its customers and determine marketing strategies according to these segments.
Apply RFM analysis to sheet named "Year 2010-2011" of online_retail_II.xlsx data set.
Where is the dataset? Download the "online_retail_II.xlsx" file at the address below.
https://www.kaggle.com/nathaniel/uci-online-retail-ii-data-set or
https://archive.ics.uci.edu/ml/machine-learning-databases/00502/
"""
#TASK 1: Simulate Exploratary Data Analysis / Data Understanding
# EDA ***********************************************************************************************************
import datetime as dt
import pandas as pd
pd.set_option('display.max_columns',None)
df_=pd.read_excel("online_retail_II.xlsx",sheet_name="Year 2010-2011")
df=df_.copy()
df.head()
df.isnull().sum()
df.info()
# What is the number of unique products :
df["Description"].nunique()
# How many product existed and their quantity :
df["Description"].value_counts().head()
# Which is the most ordered product, show in order?
df.groupby("Description").agg({"Quantity":"sum"}).sort_values("Quantity",ascending=False).head()
# How many invoices have been issued in total?
df["Invoice"].nunique()
# How much money was earn on average per invoice? ,
# (it is necessary to create a new variable by multiplying two variables)
# let's create the df again by removing the returned items
df=df[~df["Invoice"].str.contains("C",na=False)]
df.isnull().sum()
df["TotalPrice"]=df["Price"]*df["Quantity"]
df["TotalPrice"].head()
# What are the most expensive products?
df.sort_values("Price",ascending=False).head()
# How many orders came from which country :
df["Country"].value_counts().head()
# Show each countries earnings :
df.groupby("Country").agg({"TotalPrice": "sum"}).sort_values("TotalPrice", ascending=False).head()
###############################################################
# Data Preparation
###############################################################
df.isnull().sum()
df.dropna(inplace=True)
df.describe([0.01,0.05,0.1,0.25,0.5,0.75,0.90,0.95,0.99]).T
###############################################################
# Calculating RFM Metrics
###############################################################
# Recency, Frequency, Monetary
# Recency: Time from customer's last purchase.
# In other words, it is "the time since the last contact of the customer".
# So to find the recency =
# Today's date-Last purchase (you can see this process below with #the lambda function )
#We find the last time to set references.
# but we add 1 or 2 days more to this date so that those who shop on #that day will not have 0 value. By this way we are obstructing Receny values will become 0.
# here we can see that the last date is 2010–12–09 so as I mentioned #we changed it to 2010–12–11 which is shown as today_date variable
df["InvoiceDate"].max()
today_date=dt.datetime(2011,12,11)
#Lets create the Rfm. We introduced the RFM values in the "Customer #ID" breakdown.We obtained the Frequency value by keeping the number #of pieces with the len() function of the "Invoice" variable.
#We obtained the Monetary value by summing the "TotalPrice" variable #with the sum() function by using lambda functions,
rfm=df.groupby("Customer ID").agg({"InvoiceDate":lambda date : (today_date-date.max()).days, "Invoice":lambda num: len(num),
"TotalPrice": lambda TotalPrice: TotalPrice.sum()})
#We are going to change column names to #"Recency","Frequency","Monetary"
rfm.columns=["Recency","Frequency","Monetary"]
#We should check is there any anomaly in the transactions.If there #is a Transaction Monetary and Frequencey values should be greater #than 0 .
#So In summary We have brought Monetary and Frequency values greater #than 0 in order to prevent the "TotalPrice" variable being empty #even though there is a purchase.
rfm=rfm[(rfm["Monetary"])>0&(rfm["Frequency"]>0)]
rfm
###############################################################
# Calculating RFM Scores
###############################################################
# Recency
rfm["RecencyScore"] = pd.qcut(rfm["Recency"],5,labels=[5,4,3,2,1])
rfm["FrequencyScore"] = | pd.qcut(rfm["Frequency"],5,labels=[1,2,3,4,5]) | pandas.qcut |
#%%
import json
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.colors as colors
from matplotlib import ticker
from utils.libfunctions import *
def replace_at_index1(tup, ix, val):
lst = list(tup)
for i in range(0, len(ix)):
lst[ix[i]] = val[i]
return tuple(lst)
class plotSEA():
def __init__(self, eventKind :str, eventType :list,
fluxType :str, geomagModel :str) -> None:
self.eventKind = eventKind
self.eventType = eventType
self.fluxType = fluxType
self.geomagModel = geomagModel
self.eventsDicts = {'CME': 'Interplanetary Coronal Mass Ejection',
'HSS': 'High-Speed Streams'}
def plotFluxOmni(self, jsonDirectory, cutdays,
xlimFlux, ylimFlux, savePLot,
outputDir, plotParameters):
self.plotParameters = plotParameters
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Palatino']})
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=26, family='serif')
figprops = dict(nrows=len(self.plotParameters['param'])+1, ncols=len(self.eventType),
constrained_layout=True, figsize=(21, 26),dpi=120)
fig, axes = plt.subplots(**figprops)
fig.suptitle(f'{self.eventsDicts[self.eventKind]}',va='center', fontsize=35)
fig.supxlabel('Epoch [hours]', va='center', fontsize=35)
for ee in range(len(self.eventType)):
if self.fluxType == 'Flux':
with open(f'{jsonDirectory}{self.eventKind}_FluxEnhRed2_AB_Lst_asfreq.json', 'r') as f:
self.dataFlux = json.load(f)
else:
with open(f'{jsonDirectory}{self.eventKind}_PhsdEnhRed2_AB_Lst_asfreq.json', 'r') as f:
dataFlux = json.load(f)
with open(f'{jsonDirectory}{self.eventKind}_{self.eventType[ee]}_electronFluxCut_Omni.json',
'r') as f:
data = json.load(f)
with open(f'{jsonDirectory}{self.eventKind}_{self.eventType[ee]}_CutKp.json',
'r') as f:
dataKP = json.load(f)
with open(f'{jsonDirectory}{self.eventKind}_{self.eventType[ee]}_electronFluxCut_LCDS.json',
'r') as f:
dataLcds = json.load(f)
dates = list(data.keys())
datesKP = list(dataKP.keys())
datesLcds = list(dataLcds.keys())
parameters = list(data[dates[0]]['data'].keys())
parametersKP = list(dataKP[datesKP[0]]['data'].keys())
parametersLcds = list(dataLcds[datesLcds[0]]['data'].keys())
tt = | pd.to_datetime(data[dates[0]]['time']) | pandas.to_datetime |
from functools import lru_cache
from os.path import join
from pathlib import Path
import mne
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from alice_ml.utils import get_epochs_from_df
class IC:
"""
A wrapper that represents the independent component. Contains the signal, weights of channels and the sampling frequency.
"""
def __init__(self, freq, signal=None, weights=None, signal_path=None, weights_path=None):
"""
If signal is None, signal_path must be set. If weights is None, weights_path must be set
Setting signal_path and weights_path allows to use dynamic data loading with lru_cache. It is useful when your dataset is large.
Args
freq: Sampling frequency
"""
if (signal is None and signal_path is None):
raise ValueError('signal or signal_path must be provided')
if (weights is None and weights_path is None):
raise ValueError('signal or signal_path must be provided')
self._signal = signal
self._weights = weights
self._weights_path = weights_path
self._signal_path = signal_path
self.freq = freq
@property
@lru_cache(maxsize=10)
def weights(self):
if self._weights is None:
return self._read_weights()
return self._weights
@property
@lru_cache(maxsize=10)
def signal(self):
if self._signal is None:
return self._read_signal()
return self._signal
def select_weights(self, channels):
return self.weights[self.weights.index.isin(channels)]
@lru_cache(maxsize=10)
def psd(self, **kwargs):
epochs = get_epochs_from_df(self.signal, self.freq)
powers, freqs = mne.time_frequency.psd_multitaper(epochs, picks=[0], **kwargs)
return freqs, powers.mean(axis=1)
def plot_psd(self, returns=False):
fig = plt.figure()
freqs, powers = self.psd(verbose=False)
plt.fill_between(freqs, powers.mean(axis=0) - powers.std(axis=0), powers.mean(axis=0) + powers.std(axis=0), alpha=0.2)
plt.semilogy(freqs, powers.mean(axis=0))
if returns:
return fig
def plot_topomap(self, returns=False):
fig, ax = plt.subplots()
outlines = 'head'
res = 64
contours = 6
sensors = True
image_interp = 'bilinear'
show = True
extrapolate = 'box'
border = 0
ten_twenty_montage = mne.channels.make_standard_montage('standard_1020')
ten_twenty_montage_channels = {ch.lower(): ch for ch in ten_twenty_montage.ch_names}
# get channels in format of ten_twenty_montage in right order
channels_to_use_ = [ten_twenty_montage_channels[ch] for ch in self.weights.index]
# create Info object to store info
info = mne.io.meas_info.create_info(channels_to_use_, sfreq=256, ch_types="eeg")
# using temporary RawArray to apply mongage to info
mne.io.RawArray(np.zeros((len(channels_to_use_), 1)), info, copy=None, verbose=False).set_montage(ten_twenty_montage)
# pick channels
channels_to_use_ = [ch for ch in info.ch_names if ch.lower() in self.weights.index]
info.pick_channels(channels_to_use_)
_, pos, _, names, _, sphere, clip_origin = mne.viz.topomap._prepare_topomap_plot(info, 'eeg')
outlines = mne.viz.topomap._make_head_outlines(sphere, pos, outlines, clip_origin)
mne.viz.topomap.plot_topomap(
self.weights, pos, res=res,
outlines=outlines, contours=contours, sensors=sensors,
image_interp=image_interp, show=show, extrapolate=extrapolate,
sphere=sphere, border=border, axes=ax, names=names
)
if returns:
return fig
def _read_weights(self):
if self._weights_path is None:
raise RuntimeError('weights_path is None')
return pd.read_csv(self._weights_path, index_col='ch_name')['value'].rename('weights')
def _read_signal(self):
if self._signal_path is None:
raise RuntimeError('weights_path is None')
return pd.read_csv(self._signal_path).groupby('epoch')['value'].apply(np.array).rename('signal')
def read_ic(dir, ics, ic_id, preload=True):
path = Path(dir)
signal_path = join(path, f'{ic_id}_data.csv')
weights_path = join(path, f'{ic_id}_weights.csv')
freq = ics.loc[ics['ic_id'] == ic_id, 'sfreq']
if preload is True:
signal = pd.read_csv(signal_path).groupby('epoch')['value'].apply(np.array).rename('signal')
weights = pd.read_csv(weights_path, index_col='ch_name')['value'].rename('weights')
return IC(freq, signal=signal, weights=weights)
else:
return IC(freq, signal_path=signal_path, weights_path=weights_path)
def load_dataset(dir='data', preload=True):
path = Path(dir)
ics = | pd.read_csv(path/'ics.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if pd.isnull(row["astrxm1_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM2 <> NULL | Next | Reject
if pd.isnull(row["astrxm2_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM3 <> NULL | Select | Reject
if pd.notnull(row["astrxm3_dat"]):
assert row["ast_group"]
else:
assert not row["ast_group"]
def test_cns_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CNS_COV_DAT <> NULL | Select | Reject
if | pd.notnull(row["cns_cov_dat"]) | pandas.notnull |
import numpy as np
np.random.seed(42)
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import initializer
from chainer.initializers import Normal
from time import time
import pandas as pd
eps = 1e-8
def phi(obs):
"""
Feature extraction function
"""
xp = chainer.cuda.get_array_module(obs)
obs = xp.expand_dims(obs, 0)
return obs.astype(np.float32)
def batch_states(states, xp, phi):
"""The default method for making batch of observations.
Args:
states (list): list of observations from an environment.
xp (module): numpy or cupy
phi (callable): Feature extractor applied to observations
Return:
the object which will be given as input to the model.
"""
states = [phi(s) for s in states]
return xp.asarray(states)
class LeCunNormal(initializer.Initializer):
"""Initializes array with scaled Gaussian distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is
:math:`scale \\times \\sqrt{\\frac{1}{fan_{in}}}`,
where :math:`fan_{in}` is the number of input units.
Reference: LeCun 98, Efficient Backprop
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
Args:
scale (float): A constant that determines the scale
of the standard deviation.
dtype: Data type specifier.
"""
def __init__(self, scale=1.0, dtype=None):
self.scale = scale
super(LeCunNormal, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * np.sqrt(1. / fan_in)
Normal(s)(array)
class ProcessObs(chainer.Link):
"""
Observations preprocessing / feature extraction layer
"""
def __init__(self):
super().__init__()
# with self.init_scope():
# self.bn = L.BatchNormalization(self.out_channels)
def __call__(self, x):
xp = chainer.cuda.get_array_module(x)
obs = []
for i in [i for i in range(int(x.shape[-1]) - 1) if i % 6 == 0]:
pair = []
pair.append(xp.expand_dims(x[:,:,:, i + 1] / (x[:,:,:, i] + eps) - 1., -2))
pair.append(xp.expand_dims(x[:,:,:, i + 2] / (x[:,:,:, i] + eps) - 1., -2))
pair.append(xp.expand_dims(x[:,:,:, i + 3] / (x[:,:,:, i] + eps) - 1., -2))
obs.append(xp.concatenate(pair, axis=1))
# shape[batch_size, features, n_pairs, timesteps]
# return self.bn(xp.concatenate(obs, axis=-2))
return xp.concatenate(obs, axis=-2)
class PortfolioVector(chainer.Link):
def __init__(self):
super().__init__()
def __call__(self, x):
n_cols = int(x.shape[-1])
n_pairs = int((n_cols - 1) / 6)
xp = chainer.cuda.get_array_module(x)
cv = np.zeros((1, n_pairs))
for i, j in enumerate([i - 1 for i in range(1, n_cols) if (i % 6) == 0]):
cv[0, i] = xp.expand_dims(x[:,:,-1, j] * x[:,:,-1, j - 2], -1)
return chainer.Variable(xp.reshape(xp.concatenate(cv / (cv.sum() + x[:,:,-1, n_cols - 1]), axis=-1),
[-1,1,n_pairs,1]))
class CashBias(chainer.Link):
"""
Write me
"""
def __init__(self):
super().__init__()
def __call__(self, x):
xp = chainer.cuda.get_array_module(x)
fiat = xp.zeros([x.shape[0], x.shape[1], 1, 1], dtype='f') - F.sum(x, axis=2, keepdims=True)
return F.concat([x, fiat], axis=-2)
class ConvBlock(chainer.Chain):
"""
Write me
"""
def __init__(self, in_channels, out_channels, ksize, pad=(0,0)):
super().__init__()
with self.init_scope():
self.conv = L.Convolution2D(in_channels, out_channels, ksize, pad=pad,
nobias=False, initialW=LeCunNormal())
self.bn = L.BatchNormalization(out_channels)
def __call__(self, x):
h = self.conv(x)
h = self.bn(h)
return F.relu(h)
class VisionModel(chainer.Chain):
"""
Write me
"""
def __init__(self, timesteps, vn_number, pn_number):
super().__init__()
with self.init_scope():
self.obs = ProcessObs()
self.filt1 = ConvBlock(3, vn_number, (1, 3), (0, 1))
self.filt2 = ConvBlock(3, vn_number, (1, 5), (0, 2))
self.filt3 = ConvBlock(3, vn_number, (1, 7), (0, 3))
self.filt4 = ConvBlock(3, vn_number, (1, 9), (0, 4))
self.filt_out = ConvBlock(vn_number * 4, pn_number, (1, timesteps), (0, 0))
def __call__(self, x):
h = self.obs(x)
h = F.concat([self.filt1(h), self.filt2(h), self.filt3(h), self.filt4(h)], axis=1)
return self.filt_out(h)
class EIIE(chainer.Chain):
"""
Write me
"""
def __init__(self, timesteps, vn_number, pn_number):
super().__init__()
with self.init_scope():
self.vision = VisionModel(timesteps, vn_number, pn_number)
# self.portvec = PortfolioVector(input_shape)
self.conv = L.Convolution2D(pn_number, 1, 1, 1, nobias=False, initialW=LeCunNormal())
# self.cashbias = CashBias()
def __call__(self, x):
h = self.vision(x)
# h = F.concat([h, self.portvec(x)], axis=1)
h = self.conv(h)
# h = self.cashbias(h)
return F.tanh(h)
def predict(self, obs):
obs = batch_states([obs[:-1].values], chainer.cuda.get_array_module(obs), phi)
return np.append(self.__call__(obs).data.ravel(), [0.0])
def set_params(self, **kwargs):
pass
# Train functions
def get_target(obs, target_type):
n_cols = int(obs.shape[-1])
n_pairs = int((n_cols - 1) / 6)
target = np.zeros((1, n_pairs))
for i, j in enumerate([i for i in range(n_cols - 1) if i % 6 == 0]):
target[0, i] = np.expand_dims(obs[j + 3] / (obs[j] + 1e-8) - 1., -1)
if target_type == 'regression' or 'regressor':
return target
elif target_type == 'classifier' or 'classification':
return np.sign(target)
else:
raise TypeError("Bad target_type params.")
def make_train_batch(env, batch_size, target_type):
obs_batch = []
target_batch = []
for i in range(batch_size):
# Choose some random index
env.index = np.random.randint(high=env.data_length, low=env.obs_steps)
# Give us some cash
env.portfolio_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Medical Cost Personal Datasets.
# ## Objectives.
# 1. Preprocess and clean the data.
# 2. Perform Statistical Analysis of the data.
# 3. Perform Linear Regression to predict charges.
# 4. Perform Logistic Analysis to predict if a person is a smoker or not.
# 5. Perform SVM and predict if a person is a smoker or not.
# 6. Perform Boosting algorithms and predict if a person is a smoker or not.
# In[ ]:
# Hide Warnings.
import warnings
warnings.filterwarnings('ignore')
# Import pandas.
import pandas as pd
# Import numpy.
import numpy as np
# Import matplotlib.
import matplotlib.pyplot as plt
# Import Seaborn.
import seaborn as sns
# Read and display the data.
data = | pd.read_csv("../../../input/mirichoi0218_insurance/insurance.csv") | pandas.read_csv |
import pickle
import pandas as pd
import numpy as np
def load_data():
train_data = {}
file_path = '../data/tiny_train_input.csv'
data = pd.read_csv(file_path, header=None)
data.columns = ['c' + str(i) for i in range(data.shape[1])]
label = data.c0.values
label = label.reshape(len(label), 1)
train_data['y_train'] = label
co_feature = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sep
@author: CocoLiao
Topic: NEC_system_PathDist_module
Input ex:
Run_TotalSites('D:\\nec-backend\\dist\\docs\\mrData.xlsx',
'D:\\nec-backend\\dist\\docs\\workerData.xlsx',
'D:\\nec-backend\\dist\\docs\\officeAddress.xlsx',
'D:\\nec-backend\\dist\\docs\\taxiCost.xlsx',
30, 800.0, 6.0, 4.0, 2.42)
"""
# packages import
import pandas as pd
import numpy as np
import googlemaps
from gurobipy import *
import time
def Run_TotalSites(Service_FN, Worker_FN, Office_FN, TXcost_FN, workTime_buffer, PC_basicMilleage, PC_belowCost, PC_upperCost, CC_avgCost):
'''
<input>
Service_FN: string [file path] # NEC_MRDATA_original
Worker_FN: string [file path] # NEC_workerDATA
Office_FN: string [file path] # TW_sites_address
TXcost_FN: string [file path] # TW_TXcars_cost
workTime_buffer: int # works_between_buffer_mins
PC_basicMilleage: float # private_car_monthly_basic_Mileage_km
PC_belowCost: float # private_car_below_basicM_fuel_cost_$/km
PC_upperCost: float # private_car_below_basicM_fuel_cost_$/km
CC_avgCost: float # company_car_fuel_cost_$/km
<output>
PriceSens_final_df: dataframe (table)
'''
tStart = time.time()#計時開始
###### MODULE ONE: PathDist.py################################################################
def PathDist(Service_File, Worker_File, Office_File, office_EGnm):
'''
<input>
Service_File: string [file path] # NEC_MRDATA_original
Worker_File: string [file path] # NEC_workerDATA
Office_File: string [file path] # NEC_TW_sites_address
office: string
<output>
(site)_PathDist_detail: dataframe (table), original MRDATA resort with path labeled
(site)_PathDist_analy: dataframe (table), each uniquePath info with Out_date, Path_ID/order, MoveDist_GO/BACK/TOL, Begin/End_time columns
'''
# read original MR_DATA files
Service_Data = pd.read_excel(Service_File)
Worker_Data = pd.read_excel(Worker_File)
Office_Data = pd.read_excel(Office_File)
# match serciceDATA and workerDATA
Worker_Data = Worker_Data.drop(['person_nm', 'actgr_nm'], axis = 1)
Data = pd.merge(Service_Data, Worker_Data, on='case_no')
# office select and resort --> PathData
office = Office_Data.loc[Office_Data.actgr == office_EGnm]['actgr_office'].item()
office_addr = Office_Data.loc[Office_Data.actgr_office == office]['actgr_address'].item()
office_nm = Office_Data.loc[Office_Data.actgr_office == office]['actgr_name'].item()
loc_Data = Data[Data['actgr_nm'] == office_nm]
loc_Data['out_day'] = (pd.to_datetime(loc_Data.out_dt)).dt.date
loc_Data['out_dt_secs'] = pd.to_timedelta(loc_Data['out_dt']).dt.total_seconds()
loc_Data['back_dt_secs'] = pd.to_timedelta(loc_Data['back_dt']).dt.total_seconds()
loc_Data_resort = loc_Data.sort_values(['out_day','person_id','out_dt_secs'], ascending=[True,True,True])
# remove null value row by address column
loc_Data_resort = loc_Data_resort[~loc_Data_resort['comp_address'].isnull()]
loc_Data_resort = loc_Data_resort[~loc_Data_resort['out_dt'].isnull()]
# read loc_custDist data
custDist_file = '../docs/loc_CustAddr_Dist/' + office_EGnm + '_df_custAddr_dist.xlsx'
custDist_Data = | pd.read_excel(custDist_file, index_col=0) | pandas.read_excel |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = | DataFrame(arr) | pandas.DataFrame |
# <NAME> (<EMAIL>)
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import scipy.stats as ss
import mlpaper.boot_util as bu
from mlpaper.constants import METHOD, METRIC, PAIRWISE_DEFAULT, STAT, STD_STATS
from mlpaper.util import clip_chk
N_BOOT = 1000 # Default number of bootstrap replications
# ============================================================================
# Statistical util functions
# ============================================================================
def clip_EB(mu, EB, lower=-np.inf, upper=np.inf, min_EB=0.0):
"""Clip error bars to both a minimum uncertainty level and a maximum level
determined by trivial error bars from the a prior known limits of the
unknown parameter `theta`. Similar to `np.clip`, but for error bars.
Parameters
----------
mu : float
Point estimate of unknown parameter `theta` around which error bars are
based.
EB : float
Size of error bar around `mu` (``EB > 0``). The confidence interval on
`theta` is ``[mu - EB, mu + EB]``.
lower : float
A priori known theoretical lower limit on unknown parameter `theta`.
For instance, for mean zero-one loss, ``lower=0``.
upper : float
A priori known theoretical upper limit on unknown parameter `theta`.
For instance, for mean zero-one loss, ``upper=1``.
min_EB : float
Minimum size beleivable size of error bar. Typically, leave
``min_EB=0`` for simplicity.
Returns
-------
EB : float
Error bar after possible clipping.
"""
assert np.ndim(mu) == 0 and np.ndim(EB) == 0
assert np.ndim(lower) == 0 and np.ndim(upper) == 0
assert upper - lower >= 0.0 # Also catch (inf, inf) or nans
assert np.ndim(min_EB) == 0
assert 0.0 <= min_EB and min_EB < np.inf
# Note: These conditions are designed to pass when NaNs are supplied.
if lower > mu or mu > upper:
raise ValueError("mu %f outside of given limits (%f, %f)" % (mu, lower, upper))
if 2 * min_EB > upper - lower:
raise ValueError("min error bar %f too small for limits (%f, %f)" % (min_EB, lower, upper))
with np.errstate(invalid="ignore"): # expect non-finite here
EB_trivial = np.fmax(upper - mu, mu - lower)
assert not (min_EB > EB_trivial) # Let NaNs pass
EB = np.clip(EB, min_EB, EB_trivial)
return EB
def t_test(x):
"""Perform a standard t-test to test if the values in `x` are sampled from
a distribution with a zero mean.
Parameters
----------
x : array-like, shape (n_samples,)
array of data points to test.
Returns
-------
pval : float
p-value (in [0,1]) from t-test on `x`.
"""
assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))
if (len(x) <= 1) or (not np.all(np.isfinite(x))):
return 1.0 # Can't say anything about scale => p=1
_, pval = ss.ttest_1samp(x, 0.0)
if np.isnan(pval):
# Should only be possible if scale underflowed to zero:
assert np.var(x, ddof=1) <= 1e-100
# It is debatable if the condition should be ``np.mean(x) == 0.0`` or
# ``np.all(x == 0.0)``. Should not matter in practice.
pval = np.float(np.mean(x) == 0.0)
assert 0.0 <= pval and pval <= 1.0
return pval
def t_EB(x, confidence=0.95):
"""Get t statistic based error bars on mean of `x`.
Parameters
----------
x : array-like, shape (n_samples,)
Data points to estimate mean. Must not be empty or contain NaNs.
confidence : float
Confidence probability (in (0, 1)) to construct confidence interval
from t statistic.
Returns
-------
EB : float
Size of error bar on mean (>= 0). The confidence interval is
``[mean(x) - EB, mean(x) + EB]``. `EB` is inf when ``len(x) <= 1``.
"""
assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))
assert np.ndim(confidence) == 0
assert 0.0 < confidence and confidence < 1.0
N = len(x)
if (N <= 1) or (not np.all(np.isfinite(x))):
return np.inf
# loc cancels out when we just want EB anyway
LB, UB = ss.t.interval(confidence, N - 1, loc=0.0, scale=1.0)
assert not (LB > UB)
# Just multiplying scale=ss.sem(x) is better for when scale=0
EB = 0.5 * ss.sem(x) * (UB - LB)
assert np.ndim(EB) == 0 and EB >= 0.0
return EB
def bernstein_test(x, lower, upper):
"""Perform Bernstein bound-based test to test if the values in `x` are
sampled from a distribution with a zero mean. This test makes no
distributional or central limit theorem assumption on `x`.
As a result the bound may be loose and the p-value will not be sampled from
a uniform distribution under H0 (E[x] = 0), but rather be skewed larger
than uniform.
Parameters
----------
x : array-like, shape (n_samples,)
array of data points to test.
lower : float
A priori known theoretical lower limit on unknown mean. For instance,
for mean zero-one loss, ``lower=0``.
upper : float
A priori known theoretical upper limit on unknown mean. For instance,
for mean zero-one loss, ``upper=1``.
Returns
-------
pval : float
p-value (in [0,1]) from t-test on `x`.
"""
assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))
assert np.ndim(lower) == 0 and np.ndim(upper) == 0
range_ = upper - lower
assert range_ >= 0.0 # Also catch (inf, inf) or nans
assert np.all(lower <= x) and np.all(x <= upper)
if (len(x) <= 1) or (not np.all(np.isfinite(x))):
return 1.0 # Can't say anything about scale => p=1
if (range_ == 0.0) or (range_ == np.inf):
# If range_ = inf, we could use p=0, if 0 is outside of [lower, upper],
# but it is unclear if there is any advantage to the extra hassle.
# If range_ = 0, then roots not invertible and distn on data x is a
# point mass => everything has p=1.
return 1.0
# Get the moments
N = len(x)
mu = np.mean(x)
std = np.std(x, ddof=0)
coef = [(3.0 * range_) / N, std * np.sqrt(2.0 / N), -np.abs(mu)]
assert np.all(np.isfinite(coef)) # Should have caught non-finite cases
coef_roots = np.roots(coef)
assert len(coef_roots) == 2
assert coef_roots.dtype.kind == "f" # Appears roots are always real
# Appears to always be one neg and one pos root, but we looking for square
# root so the positive one is the correct one. The roots can be zero.
assert np.sum(coef_roots <= 0.0) >= 1
assert np.sum(coef_roots >= 0.0) >= 1
B = np.max(coef_roots) ** 2 # Bernstein test statistic
# Sampling CDF is bounded by exponential for any true distn on x.
delta = 3.0 * np.exp(-B)
pval = np.minimum(1.0, delta) # Can cap at 1 to make p-value
assert 0.0 <= pval and pval <= 1.0
return pval
def bernstein_EB(x, lower, upper, confidence=0.95):
"""Get Bernstein bound based error bars on mean of `x`. This error bar
makes no distributional or central limit theorem assumption on `x`.
Parameters
----------
x : array-like, shape (n_samples,)
Data points to estimate mean. Must not be empty or contain NaNs.
lower : float
A priori known theoretical lower limit on unknown mean. For instance,
for mean zero-one loss, ``lower=0``.
upper : float
A priori known theoretical upper limit on unknown mean. For instance,
for mean zero-one loss, ``upper=1``.
confidence : float
Confidence probability (in (0, 1)) to construct confidence interval
from t statistic.
Returns
-------
EB : float
Size of error bar on mean (>= 0). The confidence interval is
``[mean(x) - EB, mean(x) + EB]``. ``EB = upper - lower`` is inf when
``len(x) = 0``.
Notes
-----
This does not do clipping of to trivial error bars, i.e., `EB` could be
larger than ``upper - lower``. However, `clip_EB` can be called to enforce
trivial error bar limits.
References
----------
Audibert, Jean-Yves, <NAME>, and <NAME>.
"Exploration-exploitation tradeoff using variance estimates in multi-armed
bandits." Theoretical Computer Science 410.19 (2009): 1876-1902.
"""
assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))
assert np.ndim(lower) == 0 and np.ndim(upper) == 0
range_ = upper - lower
assert range_ >= 0.0 # Also catch (inf, inf) or nans
assert np.all(lower <= x) and np.all(x <= upper)
assert np.ndim(confidence) == 0
assert 0.0 < confidence and confidence < 1.0
N = x.size
if (N <= 1) or (not np.all(np.isfinite(x))):
return range_
# From Thm 1 of Audibert et. al. (2009), must use MLE for std ==> ddof=0
delta = 1.0 - confidence
A = np.log(3.0 / delta)
EB = np.std(x, ddof=0) * np.sqrt((2.0 * A) / N) + (3.0 * A * range_) / N
assert np.ndim(EB) == 0 and EB >= 0.0
return EB
def _boot_EB_and_test(x, confidence=0.95, n_boot=N_BOOT, return_EB=True, return_test=True, return_CI=False):
"""Internal helper function to compute both bootstrap EB and significance
using the same random bootstrap weights, which saves computation and
guarantees the results are coherent with each other."""
assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))
# confidence is checked by bu.error_bar
N = x.size
if (N <= 1) or (not np.all(np.isfinite(x))):
return np.inf, 1.0, (-np.inf, np.inf)
weight = bu.boot_weights(N, n_boot)
mu_boot = np.mean(x * weight, axis=1)
pval = bu.significance(mu_boot, ref=0.0) if return_test else 1.0
EB = np.inf
if return_EB:
mu = np.mean(x)
EB = bu.error_bar(mu_boot, mu, confidence=confidence)
# Useful in test:
CI = -np.inf, np.inf
if return_CI:
CI = bu.percentile(mu_boot, confidence=confidence)
return EB, pval, CI
def boot_test(x, n_boot=N_BOOT):
"""Perform a bootstrap-based test to test if the values in `x` are sampled
from a distribution with a zero mean.
Parameters
----------
x : array-like, shape (n_samples,)
array of data points to test.
n_boot : int
Number of bootstrap iterations to perform.
Returns
-------
pval : float
p-value (in [0,1]) from t-test on `x`.
"""
_, pval, _ = _boot_EB_and_test(x, n_boot=n_boot, return_EB=False, return_test=True)
assert 0.0 <= pval and pval <= 1.0
return pval
def boot_EB(x, confidence=0.95, n_boot=N_BOOT):
"""Get bootstrap bound based error bars on mean of `x`.
Parameters
----------
x : array-like, shape (n_samples,)
Data points to estimate mean. Must not be empty or contain NaNs.
confidence : float
Confidence probability (in (0, 1)) to construct confidence interval
from t statistic.
n_boot : int
Number of bootstrap iterations to perform.
Returns
-------
EB : float
Size of error bar on mean (>= 0). The confidence interval is
``[mean(x) - EB, mean(x) + EB]``. `EB` is inf when ``len(x) <= 1``.
"""
EB, _, _ = _boot_EB_and_test(x, confidence=confidence, n_boot=n_boot, return_EB=True, return_test=False)
assert np.ndim(EB) == 0 and EB >= 0.0
return EB
def get_mean_and_EB(x, confidence=0.95, min_EB=0.0, lower=-np.inf, upper=np.inf, method="t"):
"""Get mean loss and estimated error bar.
Parameters
----------
x : ndarray, shape (n_samples,)
Array of independent observations.
confidence : float
Confidence probability (in (0, 1)) to construct error bar.
min_EB : float
Minimum size of resulting error bar regardless of the data in `x`.
lower : float
A priori known theoretical lower limit on unknown mean of `x`. For
instance, for mean zero-one loss, ``lower=0``.
upper : float
A priori known theoretical upper limit on unknown mean of `x`. For
instance, for mean zero-one loss, ``upper=1``.
method : {'t', 'bernstein', 'boot'}
Method to use for building error bar.
Returns
-------
mu : float
Estimated mean of `x`.
EB : float
Size of error bar on mean of `x` (``EB > 0``). The confidence interval
is ``[mu - EB, mu + EB]``.
"""
assert np.all(lower <= x) and np.all(x <= upper)
if method == "t":
EB = t_EB(x, confidence=confidence)
elif method == "bernstein":
EB = bernstein_EB(x, lower, upper, confidence=confidence)
elif method == "boot":
EB = boot_EB(x, confidence=confidence)
else:
assert False
# EB subroutines already validated x for shape and nans
mu = clip_chk(np.mean(x), lower, upper)
EB = clip_EB(mu, EB, lower, upper, min_EB=min_EB)
return mu, EB
def get_test(x, lower=-np.inf, upper=np.inf, method="t"):
"""Perform a statistical test to determine if the values in `x` are sampled
from a distribution with a zero mean.
Parameters
----------
x : ndarray, shape (n_samples,)
Array of independent observations.
lower : float
A priori known theoretical lower limit on unknown mean of `x`. For
instance, for mean zero-one loss, ``lower=0``.
upper : float
A priori known theoretical upper limit on unknown mean of `x`. For
instance, for mean zero-one loss, ``upper=1``.
method : {'t', 'bernstein', 'boot'}
Method to use statistical test.
Returns
-------
pval : float
p-value (in [0,1]) from statistical test on `x`.
"""
if method == "t":
pval = t_test(x)
elif method == "bernstein":
pval = bernstein_test(x, lower, upper)
elif method == "boot":
pval = boot_test(x)
else:
assert False
return pval
def get_mean_EB_test(x, confidence=0.95, min_EB=0.0, lower=-np.inf, upper=np.inf, method="t"):
"""Get mean loss and estimated error bar. Also, perform a statistical test
to determine if the values in `x` are sampled from a distribution with a
zero mean.
Parameters
----------
x : ndarray, shape (n_samples,)
Array of independent observations.
confidence : float
Confidence probability (in (0, 1)) to construct error bar.
min_EB : float
Minimum size of resulting error bar regardless of the data in `x`.
lower : float
A priori known theoretical lower limit on unknown mean of `x`. For
instance, for mean zero-one loss, ``lower=0``.
upper : float
A priori known theoretical upper limit on unknown mean of `x`. For
instance, for mean zero-one loss, ``upper=1``.
method : {'t', 'bernstein', 'boot'}
Method to use for building error bar.
Returns
-------
mu : float
Estimated mean of `x`.
EB : float
Size of error bar on mean of `x` (``EB > 0``). The confidence interval
is ``[mu - EB, mu + EB]``.
pval : float
p-value (in [0,1]) from statistical test on `x`.
"""
assert np.all(lower <= x) and np.all(x <= upper)
if method == "t":
EB = t_EB(x, confidence=confidence)
pval = t_test(x)
elif method == "bernstein":
EB = bernstein_EB(x, lower, upper, confidence=confidence)
pval = bernstein_test(x, lower, upper)
elif method == "boot":
EB, pval, _ = _boot_EB_and_test(x, confidence=confidence)
else:
assert False
# EB subroutines already validated x for shape and nans
mu = clip_chk(np.mean(x), lower, upper)
EB = clip_EB(mu, EB, lower, upper, min_EB=min_EB)
return mu, EB, pval
# ============================================================================
# Loss summary: the main purpose of this file.
# ============================================================================
def loss_summary_table(loss_table, ref_method, pairwise_CI=PAIRWISE_DEFAULT, confidence=0.95, method_EB="t", limits={}):
"""Build table with mean and error bar summaries from a loss table that
contains losses on a per data point basis.
Parameters
----------
loss_tbl : DataFrame, shape (n_samples, n_metrics * n_methods)
DataFrame with loss of each method according to each loss function on
each data point. The rows are the data points in `y` (that is the index
matches `log_pred_prob_table`). The columns are a hierarchical index
that is the cartesian product of loss x method. That is, the loss of
method foo's prediction of ``y[5]`` according to loss function bar is
stored in ``loss_tbl.loc[5, ('bar', 'foo')]``.
ref_method : str
Name of method that is used as reference point in paired statistical
tests. This is usually some some of baseline method. `ref_method` must
be found in the 2nd level of the columns of `loss_tbl`.
pairwise_CI : bool
If True, compute error bars on the mean of ``loss - loss_ref`` instead
of just the mean of `loss`. This typically gives smaller error bars.
confidence : float
Confidence probability (in (0, 1)) to construct error bar.
method_EB : {'t', 'bernstein', 'boot'}
Method to use for building error bar.
limits : dict of str to (float, float)
Dictionary mapping metric name to tuple with (lower, upper) which are
the theoretical limits on the mean loss. For instance, zero-one loss
should be ``(0.0, 1.0)``. If entry missing, (-inf, inf) is used.
Returns
-------
perf_tbl : DataFrame, shape (n_methods, n_metrics * 3)
DataFrame with mean loss of each method according to each loss
function. The rows are the methods. The columns are a hierarchical
index that is the cartesian product of
loss x (mean, error bar, p-value). That is,
``perf_tbl.loc['foo', 'bar']`` is a pandas series with
(mean loss of foo on bar, corresponding error bar, statistical sig)
The statistical significance is a p-value from a two-sided hypothesis
test on the hypothesis H0 that foo has the same mean loss as the
reference method `ref_method`.
"""
assert loss_table.columns.names == (METRIC, METHOD)
metrics, methods = loss_table.columns.levels
assert ref_method in methods # ==> len(methods) >= 1
assert len(loss_table) >= 1 and len(metrics) >= 1
# Could also test these are cartesian product if we wanted to be exhaustive
col_names = | pd.MultiIndex.from_product([metrics, STD_STATS], names=[METRIC, STAT]) | pandas.MultiIndex.from_product |
# Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME> (in alphabetic order)
#
# This file is part of OpenModal.
#
# OpenModal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# OpenModal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenModal. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 20. maj 2014
TODO: That mnums in the beginning of every function, thats bad!
TODO: Alot of refactoring.
TODO: Put tables in a dictionary, that way you have a nice overview
of what is inside and also is much better :)
@author: Matjaz
'''
import time
import os
import itertools
from datetime import datetime
import pandas as pd
from pandas import ExcelWriter
from OpenModal.anim_tools import zyx_euler_to_rotation_matrix
import numpy as np
import pyuff
import OpenModal.utils as ut
# import _transformations as tr
# Uff fields definitions (human-readable).
types = dict()
types[15] = 'Geometry'
types[82] = 'Lines'
types[151] = 'Header'
types[2411] = 'Geometry'
types[164] = 'Units'
types[58] = 'Measurement'
types[55] = 'Analysis'
types[2420] = 'Coor. sys.'
types[18] = 'Coor. sys.'
# Function type definition.
FUNCTION_TYPE = dict()
FUNCTION_TYPE['General'] = 0 # also: unknown
FUNCTION_TYPE['Time Response'] = 1
FUNCTION_TYPE['Auto Spectrum'] = 2
FUNCTION_TYPE['Cross Spectrum'] = 3
FUNCTION_TYPE['Frequency Response Function'] = 4
FUNCTION_TYPE['Transmissibility'] = 5
FUNCTION_TYPE['Coherence'] = 6
FUNCTION_TYPE['Auto Correlation'] = 7
FUNCTION_TYPE['Cross Correlation'] = 8
FUNCTION_TYPE['Power Spectral Density (PSD)'] = 9
FUNCTION_TYPE['Energy Spectral Density (ESD)'] = 10
FUNCTION_TYPE['Probability Density Function'] = 11
FUNCTION_TYPE['Spectrum'] = 12
FUNCTION_TYPE['Cumulative Frequency Distribution'] = 13
FUNCTION_TYPE['Peaks Valley'] = 14
FUNCTION_TYPE['Stress/Cycles'] = 15
FUNCTION_TYPE['Strain/Cycles'] = 16
FUNCTION_TYPE['Orbit'] = 17
FUNCTION_TYPE['Mode Indicator Function'] = 18
FUNCTION_TYPE['Force Pattern'] = 19
FUNCTION_TYPE['Partial Power'] = 20
FUNCTION_TYPE['Partial Coherence'] = 21
FUNCTION_TYPE['Eigenvalue'] = 22
FUNCTION_TYPE['Eigenvector'] = 23
FUNCTION_TYPE['Shock Response Spectrum'] = 24
FUNCTION_TYPE['Finite Impulse Response Filter'] = 25
FUNCTION_TYPE['Multiple Coherence'] = 26
FUNCTION_TYPE['Order Function'] = 27
FUNCTION_TYPE['Phase Compensation'] = 28
# Specific data type for abscisa/ordinate
SPECIFIC_DATA_TYPE = dict()
SPECIFIC_DATA_TYPE['unknown'] = 0
SPECIFIC_DATA_TYPE['general'] = 1
SPECIFIC_DATA_TYPE['stress'] = 2
SPECIFIC_DATA_TYPE['strain'] = 3
SPECIFIC_DATA_TYPE['temperature'] = 5
SPECIFIC_DATA_TYPE['heat flux'] = 6
SPECIFIC_DATA_TYPE['displacement'] = 8
SPECIFIC_DATA_TYPE['reaction force'] = 9
SPECIFIC_DATA_TYPE['velocity'] = 11
SPECIFIC_DATA_TYPE['acceleration'] = 12
SPECIFIC_DATA_TYPE['excitation force'] = 13
SPECIFIC_DATA_TYPE['pressure'] = 15
SPECIFIC_DATA_TYPE['mass'] = 16
SPECIFIC_DATA_TYPE['time'] = 17
SPECIFIC_DATA_TYPE['frequency'] = 18
SPECIFIC_DATA_TYPE['rpm'] = 19
SPECIFIC_DATA_TYPE['order'] = 20
SPECIFIC_DATA_TYPE['sound pressure'] = 21
SPECIFIC_DATA_TYPE['sound intensity'] = 22
SPECIFIC_DATA_TYPE['sound power'] = 23
# TODO: Fast get and set. Check setting with enlargement.
class ModalData(object):
"""The data object holds all measurement, results and geometry data
"""
def __init__(self):
"""
Constructor
"""
self.create_empty()
def create_empty(self):
"""Create an empty data container."""
# Tables
self.tables = dict()
# Holds the tables, populated by importing a uff file.
# TODO: This is temporary? Maybe, maybe not, might be
# a good idea to have some reference of imported data!
self.uff_import_tables = dict()
self.create_info_table()
self.create_geometry_table()
self.create_measurement_table()
self.create_analysis_table()
self.create_lines_table()
self.create_elements_table()
# Set model id
self.model_id = 0
def create_info_table(self):
"""Creates an empty info table."""
self.tables['info'] = pd.DataFrame(columns=['model_id', 'model_name', 'description', 'units_code', 'length',
'force', 'temp', 'temp_offset'])
# self.tables['info'] = pd.DataFrame(columns=['model_id', 'uffid', 'value'])
def create_geometry_table(self):
"""Creates an empty geometry table."""
self.tables['geometry'] = pd.DataFrame(columns=['model_id', 'uffid', 'node_nums',
'x', 'y', 'z', 'thx', 'thy', 'thz',
'disp_cs', 'def_cs',
'color','clr_r','clr_g','clr_b','clr_a',
'r','phi','cyl_thz'])
def create_measurement_table(self):
"""Creates an empty measurement table."""
self.tables['measurement_index'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'uffid', 'field_type', 'excitation_type',
'func_type', 'rsp_node', 'rsp_dir', 'ref_node',
'ref_dir', 'abscissa_spec_data_type',
'ordinate_spec_data_type', 'orddenom_spec_data_type', 'zero_padding'], dtype=int)
self.tables['measurement_values'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'frq', 'amp'])
self.tables['measurement_values'].amp = self.tables['measurement_values'].amp.astype('complex')
self.tables['measurement_values_td'] = pd.DataFrame(columns=['model_id', 'measurement_id', 'n_avg', 'x_axis',
'excitation', 'response'])
def create_analysis_table(self):
"""Creates an empty analysis table."""
self.tables['analysis_index'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method', 'uffid',
'field_type', 'analysis_type', 'data_ch',
'spec_data_type', 'load_case', 'mode_n', 'eig', 'freq',
'freq_step_n', 'node_nums', 'rsp_node', 'rsp_dir',
'ref_node', 'ref_dir', 'data_type', 'ref_node', 'ref_dir',
'data_type', 'eig_real','eig_xi', 'spots'])
self.tables['analysis_values'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method', 'mode_n',
'node_nums', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6'])
self.tables['analysis_settings'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method',
'f_min','f_max', 'nmax', 'err_fn', 'err_xi', ])
self.tables['analysis_stabilisation'] = pd.DataFrame(columns=['model_id', 'analysis_id', 'analysis_method',
'pos', 'size', 'pen_color', 'pen_width',
'symbol', 'brush', 'damp'])
self.tables['analysis_index'].eig = self.tables['analysis_index'].eig.astype('complex')
self.tables['analysis_values'].r1 = self.tables['analysis_values'].r1.astype('complex')
self.tables['analysis_values'].r2 = self.tables['analysis_values'].r2.astype('complex')
self.tables['analysis_values'].r3 = self.tables['analysis_values'].r3.astype('complex')
self.tables['analysis_values'].r4 = self.tables['analysis_values'].r4.astype('complex')
self.tables['analysis_values'].r5 = self.tables['analysis_values'].r5.astype('complex')
self.tables['analysis_values'].r6 = self.tables['analysis_values'].r6.astype('complex')
def create_lines_table(self):
"""Creates an empty lines table."""
self.tables['lines'] = pd.DataFrame(['model_id', 'uffid', 'id', 'field_type', 'trace_num',
'color', 'n_nodes', 'trace_id', 'pos', 'node'])
def create_elements_table(self):
"""Creates an empty elements table."""
# TODO: Missing 'physical property table number' and 'material property ...'
# TODO: Missing 'fe descriptor id', chosen from a list of 232(!) types!!?
# TODO: Missing beam support.
self.tables['elements_index'] = pd.DataFrame(columns=['model_id', 'element_id', 'element_descriptor', 'color',
'nr_of_nodes','clr_r','clr_g','clr_b','clr_a'])
self.tables['elements_values'] = pd.DataFrame(columns=['model_id', 'element_id', 'node_id', 'node_pos'])
def new_model(self, model_id=-1, entries=dict()):
"""Set new model id. Values can be set through entries dictionary, for each
value left unset, default will be used."""
if model_id == -1:
# Create a new model_id. First check if table is empty.
current_models = self.tables['info'].model_id
if current_models.size == 0:
model_id = 0
else:
model_id = current_models.max() + 1
fields = {'db_app': 'ModalData', 'time_db_created': time.strftime("%d-%b-%y %H:%M:%S"),
'time_db_saved': time.strftime("%d-%b-%y %H:%M:%S"), 'program': 'OpenModal',
'model_name': 'DefaultName', 'description': 'DefaultDecription', 'units_code': 9,
'temp': 1, 'temp_mode': 1, 'temp_offset': 1, 'length': 1, 'force': 1,
'units_description': 'User unit system'}
for key in entries:
fields[key] = entries[key]
# TODO: Check if model_id already exists.
input = [model_id, fields['model_name'], fields['description'], fields['units_code'], fields['length'],
fields['force'], fields['temp'], fields['temp_offset']]
new_model = pd.DataFrame([input], columns=['model_id', 'model_name', 'description', 'units_code', 'length',
'force', 'temp', 'temp_offset'])
self.tables['info'] = pd.concat([self.tables['info'], new_model], ignore_index=True)
return model_id
def new_measurement(self, model_id, excitation_type, frequency, h, reference=[0, 0], response=[0, 0],
function_type='Frequency Response Function', abscissa='frequency', ordinate='acceleration',
denominator='excitation force', zero_padding=0, td_x_axis=np.array([]),
td_excitation=None, td_response=None):
"""Add a new measurement."""
# Check if model id exists.
if self.tables['info'].model_id.size == 0:
raise ValueError
elif not any(self.tables['info'].model_id == model_id):
raise ValueError
# Prepare a new measurement_id.
if self.tables['measurement_index'].measurement_id.size == 0:
measurement_id = 0
else:
measurement_id = self.tables['measurement_index'].measurement_id.max() + 1
newentry_idx = pd.DataFrame([[model_id, measurement_id, excitation_type, FUNCTION_TYPE[function_type], response[0],
response[1], reference[0], reference[1], SPECIFIC_DATA_TYPE[abscissa],
SPECIFIC_DATA_TYPE[ordinate], SPECIFIC_DATA_TYPE[denominator], zero_padding]],
columns=['model_id', 'measurement_id', 'excitation_type', 'func_type', 'rsp_node', 'rsp_dir',
'ref_node', 'ref_dir', 'abscissa_spec_data_type',
'ordinate_spec_data_type', 'orddenom_spec_data_type', 'zero_padding'])
self.tables['measurement_index'] = pd.concat([ self.tables['measurement_index'],
newentry_idx], ignore_index=True)
# Add entry with measured frf.
newentry_val = pd.DataFrame(columns=['model_id', 'measurement_id', 'frq', 'amp'])
newentry_val['frq'] = frequency
newentry_val['amp'] = h
newentry_val['model_id'] = model_id
newentry_val['measurement_id'] = measurement_id
self.tables['measurement_values'] = pd.concat([self.tables['measurement_values'],
newentry_val], ignore_index=True)
# if td_x_axis.size > 0:
# # TODO: Create it with size you already know. Should be faster?
# newentry_val_td = pd.DataFrame(columns=['model_id', 'measurement_id', 'x_axis', 'excitation', 'response'])
# newentry_val_td['x_axis'] = td_x_axis
# newentry_val_td['excitation'] = td_excitation
# newentry_val_td['response'] = td_response
# newentry_val_td['model_id'] = model_id
# newentry_val_td['measurement_id'] = measurement_id
#
# self.tables['measurement_values_td'] = pd.concat([self.tables['measurement_values_td'], newentry_val_td],
# ignore_index=True)
if td_x_axis.size > 0:
n_averages = len(td_response)
i = 0
# TODO: Optimize here.
for td_excitation_i, td_response_i in zip(td_excitation, td_response):
# TODO: Create it with size you already know. Should be faster?
newentry_val_td = pd.DataFrame(columns=['model_id', 'measurement_id', 'n_avg',
'x_axis', 'excitation', 'response'])
newentry_val_td['x_axis'] = td_x_axis
newentry_val_td['excitation'] = td_excitation_i
newentry_val_td['response'] = td_response_i
newentry_val_td['model_id'] = model_id
newentry_val_td['measurement_id'] = measurement_id
newentry_val_td['n_avg'] = i
i += 1
self.tables['measurement_values_td'] = pd.concat([self.tables['measurement_values_td'], newentry_val_td],
ignore_index=True)
def remove_model(self, model_id):
"""Remove all data connected to the supplied model id."""
try:
el_idx = self.tables['elements_index']
el_vals = self.tables['elements_values']
elements_id = el_idx[el_idx.model_id == model_id].element_id
self.tables['elements_values'] = self.tables['elements_values'][~el_vals.element_id.isin(elements_id)]
self.tables['elements_index'] = self.tables['elements_index'][el_idx.model_id != model_id]
except AttributeError:
print('There is no element data to delete.')
try:
lines = self.tables['lines']
self.tables['lines'] = self.tables['lines'][lines.model_id != model_id]
except AttributeError:
print('There is no line data to delete.')
try:
an_idx = self.tables['analysis_index']
an_vals = self.tables['analysis_values']
analysis_id = an_idx[an_idx.model_id == model_id].analysis_id
self.tables['analysis_values'] = self.tables['analysis_values'][~an_vals.element_id.isin(analysis_id)]
self.tables['analysis_index'] = self.tables['analysis_index'][an_idx.model_id != model_id]
except AttributeError:
print('There is no analysis data to delete.')
try:
me_idx = self.tables['measurement_index']
me_vals = self.tables['measurement_values']
me_vals_td = self.tables['measurement_values_td']
measurement_id = me_idx[me_idx.model_id == model_id].measurement_id
self.tables['measurement_values_td'] = self.tables['measurement_values_td'][~me_vals_td.measurement_id.isin(measurement_id)]
self.tables['measurement_values'] = self.tables['measurement_values'][~me_vals.measurement_id.isin(measurement_id)]
self.tables['measurement_index'] = self.tables['measurement_index'][me_idx.model_id != model_id]
except AttributeError:
print('There is no measurement data to delete.')
try:
geometry = self.tables['geometry']
self.tables['geometry'] = self.tables['geometry'][geometry.model_id != model_id]
except AttributeError:
print('There is no geometry data to delete.')
try:
info = self.tables['info']
self.tables['info'] = self.tables['info'][info.model_id != model_id]
except AttributeError:
print('There is no info data to delete.')
def import_uff(self, fname):
"""Pull data from uff."""
# Make sure you start with new model ids at the appropriate index.
if self.tables['info'].model_id.size > 0:
base_key = self.tables['info'].model_id.max() + 1
else:
base_key=0
uffdata = ModalDataUff(fname, base_key=base_key)
for key in self.tables.keys():
if key in uffdata.tables:
# uffdata.tables[key].model_id += 100
self.tables[key] = pd.concat([self.tables[key], uffdata.tables[key]], ignore_index=True)
self.uff_import_tables[key] = ''
self.file_structure = uffdata.file_structure
def export_to_uff(self, fname, model_ids=[], data_types=[], separate_files_flag=False):
"""Export data to uff."""
model_ids = self.tables['info'].model_id.unique()
if len(model_ids) == 0:
model_ids = self.tables['info'].model_id.unique()
if len(data_types) == 0:
data_types = ['nodes', 'lines', 'elements', 'measurements', 'analyses']
if len(model_ids) == 0:
print('Warning: Empty tables. (No model_ids found).')
return False
t = datetime.now()
folder_timestamp = 'OpenModal Export UFF -- {:%Y %d-%m %H-%M-%S}'.format(t)
export_folder = os.path.join(fname, folder_timestamp)
try:
os.mkdir(export_folder)
except:
print('Warning: File exists. Try again later ...')
return False
for model_id in model_ids:
# -- Write info.
dfi = self.tables['info']
dfi = dfi[dfi.model_id == model_id]
# TODO: Do not overwrite this dfi
model_name = dfi.model_name.values[0]
if not separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}.uff'.format(model_name, model_id)))
if len(dfi) != 0:
dset_info = {'db_app': 'modaldata v1',
'model_name': dfi.model_name.values[0],
'description': dfi.description.values[0],
'program': 'Open Modal'}
dset_units = {'units_code': dfi.units_code.values[0],
# TODO: Maybe implement other data.
# 'units_description': dfi.units_description,
# 'temp_mode': dfi.temp_mode,
'length': dfi.length.values[0],
'force': dfi.force.values[0],
'temp': dfi.temp.values[0],
'temp_offset': dfi.temp_offset.values[0]}
# for key in dset_info.keys():
# dset_info[key] = dset_info[key].value.values[0]
dset_info['type'] = 151
# for key in dset_units.keys():
# dset_units[key] = dset_units[key].value.values[0]
dset_units['type'] = 164
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_info.uff'.format(model_name, model_id)))
uffwrite._write_set(dset_info, mode='add')
uffwrite._write_set(dset_units, mode='add')
# -- Write Geometry.
if 'nodes' in data_types:
dfg = self.tables['geometry']
#dfg = dfg[dfg.model_id==model_id]
#drop nan lines defined in geometry
model_id_mask=dfg.model_id==model_id
nan_mask = dfg[['node_nums','x', 'y', 'z','thz', 'thy', 'thx' , 'model_id']].notnull().all(axis=1)
comb_mask = model_id_mask & nan_mask
dfg = dfg[comb_mask]
if len(dfg) != 0:
# .. First the coordinate systems. Mind the order of angles (ZYX)
size = len(dfg)
local_cs = np.zeros((size * 4, 3), dtype=float)
th_angles = dfg[['thz', 'thy', 'thx']].values
for i in range(size):
#local_cs[i*4:i*4+3, :] = ut.zyx_euler_to_rotation_matrix(th_angles[i, :])
local_cs[i*4:i*4+3, :] = zyx_euler_to_rotation_matrix(th_angles[i, :]*np.pi/180.)
local_cs[i*4+3, :] = 0.0
dset_cs = {'local_cs': local_cs, 'nodes': dfg[['node_nums']].values, 'type': 2420}
uffwrite._write_set(dset_cs, mode='add')
# .. Then points.
dset_geometry = {'grid_global': dfg[['node_nums', 'x', 'y', 'z']].values,
'export_cs_number': 0,
'cs_color': 8,
'type': 2411}
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_nodes.uff'.format(model_name, model_id)))
uffwrite._write_set(dset_geometry, mode='add')
# -- Write Measurements.
if 'measurements' in data_types:
dfi = self.tables['measurement_index']
dfi = dfi[dfi.model_id == model_id]
dfi.field_type = 58
if len(dfi) != 0:
dfv = self.tables['measurement_values']
dfv = dfv[dfv.model_id == model_id]
for id, measurement in dfi.iterrows():
data = dfv[dfv.measurement_id == measurement.measurement_id]
dsets={'type': measurement['field_type'],
'func_type': measurement['func_type'],
'data': data['amp'].values.astype('complex'),
'x': data['frq'].values,
'rsp_node': measurement['rsp_node'],
'rsp_dir': measurement['rsp_dir'],
'ref_node': measurement['ref_node'],
'ref_dir': measurement['ref_dir'],
'rsp_ent_name':model_name, 'ref_ent_name':model_name}
# TODO: Make rsp_ent_name and ref_ent_name fields in measurement_index table.
if pd.isnull(measurement['abscissa_spec_data_type']):
dsets['abscissa_spec_data_type'] = 0
else:
dsets['abscissa_spec_data_type'] = measurement['abscissa_spec_data_type']
if pd.isnull(measurement['ordinate_spec_data_type']):
dsets['ordinate_spec_data_type'] = 0
else:
dsets['ordinate_spec_data_type'] = measurement['ordinate_spec_data_type']
if pd.isnull(measurement['orddenom_spec_data_type']):
dsets['orddenom_spec_data_type'] = 0
else:
dsets['orddenom_spec_data_type'] = measurement['orddenom_spec_data_type']
if separate_files_flag:
uffwrite=pyuff.UFF(os.path.join(export_folder, '{0}_{1:.0f}_measurements.uff'.format(model_name, model_id)))
uffwrite._write_set(dsets, mode='add')
def export_to_csv(self, fname, model_ids=[], data_types=[]):
"""Export data to uff."""
if len(model_ids) == 0:
model_ids = self.tables['info'].model_id.unique()
if len(data_types) == 0:
data_types = ['nodes', 'lines', 'elements', 'measurements', 'analyses']
if len(model_ids) == 0:
print('Warning: Empty tables. (No model_ids found).')
return False
t = datetime.now()
folder_timestamp = 'OpenModal Export CSV -- {:%Y %d-%m %H-%M-%S}'.format(t)
export_folder = os.path.join(fname, folder_timestamp)
try:
os.mkdir(export_folder)
except:
print('Warning: File exists. Try again later ...')
return False
for model_id in model_ids:
# -- Write info.
dfi = self.tables['info']
dfi = dfi[dfi.model_id == model_id]
model_name = '{0}_{1:.0f}'.format(dfi.model_name.values[0], model_id)
model_dir = os.path.join(export_folder, model_name)
os.mkdir(model_dir)
df_ = self.tables['info']
df_[df_.model_id == model_id].to_csv(os.path.join(model_dir, 'info.csv'))
if 'nodes' in data_types:
df_ = self.tables['geometry']
df_[df_.model_id == model_id].to_csv(os.path.join(model_dir, 'geometry.csv'))
# -- Special treatment for measurements
if 'measurements' in data_types:
measurements_dir = os.path.join(model_dir, 'measurements')
os.mkdir(measurements_dir)
df_ = self.tables['measurement_index']
df_[df_.model_id == model_id].to_csv(os.path.join(measurements_dir, 'measurements_index.csv'))
df_ = self.tables['measurement_values']
grouped_measurements = df_[df_.model_id == model_id].groupby('measurement_id')
for id, measurement in grouped_measurements:
measurement['amp_real'] = measurement.amp.real
measurement['amp_imag'] = measurement.amp.imag
measurement[['frq', 'amp_real', 'amp_imag']].to_csv(os.path.join(measurements_dir,
'measurement_{0:.0f}.csv'.format(id)),
index=False)
class ModalDataUff(object):
'''
Reads the uff file and populates the following pandas tables:
-- ModalData.measurement_index : index of all measurements from field 58
-- ModalData.geometry : index of all points with CS from fields 2411 and 15
-- ModalData.info : info about measurements
Based on the position of field in the uff file, uffid is assigned to each field in the following
maner: first field, uffid = 0, second field, uffid = 1 and so on. Columns are named based on keys
from the UFF class if possible. Fields uffid and field_type (type of field, eg. 58) are added.
Geometry table combines nodes and their respective CSs, column names are altered.
'''
def __init__(self, fname='../../unvread/data/shield.uff', base_key=0):
'''
Constructor
'''
self.uff_object = pyuff.UFF(fname)
# Start above base_key.
self.base_key = base_key
self.uff_types = self.uff_object.get_set_types()
# print(self.uff_types)
# Models
self.models = dict()
# Tables
self.tables = dict()
# Coordinate-system tables
self.localcs = pd.DataFrame(columns=['model_id', 'uffidcs', 'node_nums', 'x1', 'x2', 'x3',
'y1', 'y2', 'y3',
'z1', 'z2', 'z3'])
self.localeul = | pd.DataFrame(columns=['model_id', 'uffidcs', 'node_nums', 'thx', 'thy', 'thz']) | pandas.DataFrame |
'''
Simple vanilla LSTM multiclass classifier for raw EEG data
'''
import scipy.io as spio
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import gc
import h5py
def loadmat(filename):
def _check_keys(d):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in d:
if isinstance(d[key], spio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
return d
def _has_struct(elem):
"""Determine if elem is an array and if any array item is a struct"""
return isinstance(elem, np.ndarray) and any(isinstance(
e, spio.matlab.mio5_params.mat_struct) for e in elem)
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
d = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, spio.matlab.mio5_params.mat_struct):
d[strg] = _todict(elem)
elif _has_struct(elem):
d[strg] = _tolist(elem)
else:
d[strg] = elem
return d
def _tolist(ndarray):
'''
A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects.
'''
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, spio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
"""Helper function to truncate dataframes to a specified shape - usefull to reduce all EEG trials to the same number
of time stamps.
"""
def truncate(arr, shape):
desired_size_factor = np.prod([n for n in shape if n != -1])
if -1 in shape: # implicit array size
desired_size = arr.size // desired_size_factor * desired_size_factor
else:
desired_size = desired_size_factor
return arr.flat[:desired_size].reshape(shape)
def main():
PATH = "G:\\UWA_MDS\\2021SEM1\\Research_Project\\KARA_ONE_Data\\ImaginedSpeechData\\"
subjects = ['MM05', 'MM08', 'MM09', 'MM10', 'MM11', 'MM12', 'MM14', 'MM15', 'MM16', 'MM18', 'MM19', 'MM20', 'MM21', 'P02']
for subject in subjects:
print("Working on Subject: " + subject)
print("Loading .set data")
""" Load EEG data with loadmat() function"""
SubjectData = loadmat(PATH + subject + '\\EEG_data.mat')
print("Setting up dataframes")
""" Setup target and EEG dataframes"""
targets = pd.DataFrame(SubjectData['EEG_Data']['prompts'])
targets.columns = ['prompt']
sequences = | pd.DataFrame(SubjectData['EEG_Data']['activeEEG']) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
from evidently import ColumnMapping
from evidently.analyzers.data_quality_analyzer import DataQualityAnalyzer
from evidently.analyzers.data_quality_analyzer import FeatureQualityStats
from evidently.analyzers.utils import process_columns
import pytest
@pytest.mark.parametrize(
"dataset, expected_metrics",
[
(
pd.DataFrame({"numerical_feature": []}),
FeatureQualityStats(
feature_type="num",
count=0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
infinite_count=None,
infinite_percentage=None,
max=None,
min=None,
mean=None,
missing_count=None,
missing_percentage=None,
most_common_value=None,
most_common_value_percentage=None,
std=None,
unique_count=None,
unique_percentage=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"numerical_feature": [np.nan, np.nan, np.nan, np.nan]}),
FeatureQualityStats(
feature_type="num",
count=0,
percentile_25=np.nan,
percentile_50=np.nan,
percentile_75=np.nan,
infinite_count=0,
infinite_percentage=0,
max=np.nan,
min=np.nan,
mean=np.nan,
missing_count=4,
missing_percentage=100,
most_common_value=np.nan,
most_common_value_percentage=100,
std=np.nan,
unique_count=0,
unique_percentage=0,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"numerical_feature": [np.nan, 2, 2, 432]}),
FeatureQualityStats(
feature_type="num",
count=3,
infinite_count=0,
infinite_percentage=0.0,
missing_count=1,
missing_percentage=25,
unique_count=2,
unique_percentage=50,
percentile_25=2.0,
percentile_50=2.0,
percentile_75=217.0,
max=432.0,
min=2.0,
mean=145.33,
most_common_value=2,
most_common_value_percentage=50,
std=248.26,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
],
)
def test_data_profile_analyzer_num_features(dataset: pd.DataFrame, expected_metrics: FeatureQualityStats) -> None:
data_profile_analyzer = DataQualityAnalyzer()
data_mapping = ColumnMapping(
numerical_features=["numerical_feature"],
)
result = data_profile_analyzer.calculate(dataset, None, data_mapping)
assert result.reference_features_stats is not None
assert result.reference_features_stats.num_features_stats is not None
assert "numerical_feature" in result.reference_features_stats.num_features_stats
metrics = result.reference_features_stats.num_features_stats["numerical_feature"]
assert metrics == expected_metrics
@pytest.mark.parametrize(
"dataset, expected_metrics",
[
(
| pd.DataFrame({"category_feature": []}) | pandas.DataFrame |
import pickle
from pathlib import Path
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import SelectField, SubmitField, SelectMultipleField
from wtforms.widgets import CheckboxInput, ListWidget
from wtforms.validators import DataRequired
import pandas as pd
# https://gist.github.com/juzten/2c7850462210bfa540e3
class MultiCheckboxField(SelectMultipleField):
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
_FACTIONS = [
"alchemists",
"auren",
"chaosmagicians",
"cultists",
"darklings",
"dwarves",
"engineers",
"fakirs",
"giants",
"halflings",
"mermaids",
"nomads",
"swarmlings",
"witches",
]
_SCORE_NAMES_DICT = {
"SCORE1": "1 EARTH -> 1 C | SPADE >> 2",
"SCORE2": "4 EARTH -> 1 SPADE | TOWN >> 5",
"SCORE3": "4 WATER -> 1 P | D >> 2",
"SCORE4": "2 FIRE -> 1 W | SA/SH >> 5",
"SCORE5": "4 FIRE -> 4 PW | D >> 2",
"SCORE6": "4 WATER -> 1 SPADE | TP >> 3",
"SCORE7": "2 AIR -> 1 W | SA/SH >> 5",
"SCORE8": "4 AIR -> 1 SPADE | TP >> 3",
"SCORE9": "1 CULT_P -> 2 C | TE >> 4",
}
_SCORES = [(key, f"{key} - {value}") for key, value in _SCORE_NAMES_DICT.items()]
_BONUSES = [f"BON{i}" for i in range(1, 11)]
def load_model():
"""Deserialize the predictive model"""
model_path = Path(__file__).parent / "pickles" / "model.pkl"
with open(model_path, "rb") as model_pickle:
model = pickle.load(model_pickle)
return model
def load_input_series():
"""Deserialize the input series"""
series_path = Path(__file__).parent / "pickles" / "input_series.pkl"
with open(series_path, "rb") as series_pickle:
series = pickle.load(series_pickle)
return series
_MODEL = load_model()
_INPUT_SERIES = load_input_series()
def generate_input_series(score_seq, bonuses, faction):
"""Take inputs in the style they'll be received from the webform and turn them into
a series suitable for producing predictions
Parameters
----------
score_seq: [str]
Sequence of scoring tiles. Should be something like ["SCORE7", "SCORE1"...]
bonuses: [str]
The list of bonus tiles for the game, should be something like
["BON1", "BON10"...]
faction: str
The faction to score for this particular scenario
"""
input_series = _INPUT_SERIES.copy()
# Player number doesn't really matter, just put something
input_series.loc["player_num"] = 2.5
# Identify faction
faction_index = f"faction_{faction}"
if faction_index in input_series.index:
input_series.loc[faction_index] = 1
for num, score in enumerate(score_seq, 1):
index = f"faction_{faction}_x_score_turn_{num}_{score}"
if index in input_series.index:
input_series.loc[index] = 1
# Populate the bonus interaction rows
for bonus in bonuses:
index = f"faction_{faction}_x_{bonus}"
if index in input_series.index:
input_series.loc[index] = 1
# Get the input in the right shape for prediction
predict_in = input_series.to_frame().T
return predict_in
def generate_prediction(input_series):
"""Estimate victory point margin for a given scenario and faction"""
return _MODEL.predict(input_series)[0]
def generate_scenario_df(score1, score2, score3, score4, score5, score6, bonuses):
"""Take inputs in the style they'll be received from the webform and turn them into
A table of predictions
Parameters
----------
score{n}: str
Score tile for the nth turn. Should be something like "SCORE7"
bonuses: [str]
The list of bonus tiles for the game, should be something like
["BON1", "BON10"...]
faction: str
The faction to score for this particular scenario
"""
score_seq = [score1, score2, score3, score4, score5, score6]
output_series = | pd.Series(index=_FACTIONS, data=0) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================================= #
# DS_generator.py #
# Author: <NAME> #
# Creation Date: 03/10/2020 #
# ============================================================================================= #
"""
Imports original DECAGON database and translates it into adjaceny matrices and enumeration
dictionaries. First the original dataset is filtered so it has no unlinked nodes creating a
consistent network. Then a fraction of the dataset is chosen, selecting a fixed number of
polypharmacy side effects given by parameter N (defaults to 964). With the reduced network,
the adjacency matrices and the node enumeration dictionaries are created and exported as a
pickle python3 readable file.
Parameters
----------
number of side effects : int, default=964
Number of joint drug side effects to be chosen from the complete dataset. If not given,
the program uses the maximum number of side effects used by the authors of DECAGON.
"""
# ============================================================================================= #
import argparse
import numpy as np
import pandas as pd
import scipy.sparse as sp
import pickle
from joblib import Parallel, delayed
parser = argparse.ArgumentParser(description='Remove outliers from datasets')
parser.add_argument('N', nargs='?',default =964,type=int, help="Number of side effects")
args = parser.parse_args()
N = args.N
# Import databases as pandas dataframes
PPI = | pd.read_csv('original_data/bio-decagon-ppi.csv',sep=',') | pandas.read_csv |
import pandas as pd
import numpy as np
import random
import networkx as nx
import math
import time, math
import json
import glob
import os
import pickle
from datetime import datetime, timedelta, date
from collections import Counter
import networkx as nx
"""Helper Functions"""
def convert_datetime(dataset, verbose):
"""
Description:
Input:
Output:
"""
if verbose:
print('Converting strings to datetime objects...', end='', flush=True)
try:
dataset['nodeTime'] = pd.to_datetime(dataset['nodeTime'], unit='s')
except:
try:
dataset['nodeTime'] = pd.to_datetime(dataset['nodeTime'], unit='ms')
except:
dataset['nodeTime'] = | pd.to_datetime(dataset['nodeTime']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import unittest
import pandas as pd
import pandas.testing as tm
import numpy as np
from pandas_xyz import algorithms as algs
class TestAlgorithms(unittest.TestCase):
def test_displacement(self):
"""Test out my distance algorithm with hand calcs."""
lon = | pd.Series([0.0, 0.0, 0.0]) | pandas.Series |
import torch
import pandas as pd
import numpy as np
import time
import traceback
import torch.utils.data
from pathlib import Path
import os,sys
import cv2
import yaml
from imutils.paths import list_images
from tqdm import tqdm
import argparse
import albumentations as A
try:
import pretty_errors
pretty_errors.configure(display_locals = True)
except ImportError:
pass
import warnings
#warnings.filterwarnings("ignore",)
from rich import print,console
from rich.progress import track
console = console.Console()
from model import build_model
from dataset import create_transform
from importlib import machinery
_cur=Path(__file__).absolute().parent
machinery.SourceFileLoader('general',str(_cur/'../libs/general.py')).load_module()
machinery.SourceFileLoader('m_utils',str(_cur/'../libs/model_utils.py')).load_module()
from general import clock,create_log,seed_everything
from m_utils import get_labels,Model_Saver,get_features
def parse_args():
parser = argparse.ArgumentParser(description='Test model')
parser.add_argument('--gpu', dest='gpu',help='gpu ID', type=str)
parser.add_argument('--shape', dest='shape',help='img resize',nargs='+',type=int )
parser.add_argument('--mname', dest='mname',help='model name',type=str)
parser.add_argument('--mpath', dest='mpath',help='model path', type=str)
parser.add_argument('--tta', dest='tta',help='tta trigger', type=float)
parser.add_argument('--test',dest='test_path',type=str)
parser.add_argument('--yaml',dest='yaml_path',type=str)
parser.add_argument('--calculate',dest='c',help='calculate score or not',type=int)
parser.add_argument('--thre', dest='thre',help='threshold',type=float)
parser.add_argument('--if_feature', dest='if_feature',type=int)
args = parser.parse_args()
return args
def test(args, model):
files=list(list_images(args.test_path))
trfm_train,trfm_val=create_transform(args)
df= | pd.DataFrame(columns=('filename','pred','score')) | pandas.DataFrame |
import os
import urllib
import json
import time
import arrow
import numpy as np
import pandas as pd
from pymongo import MongoClient, UpdateOne
MONGO_URI = os.environ.get('MONGO_URI')
DARKSKY_KEY = os.environ.get('DARKSKY_KEY')
FARM_LIST = ['BLUFF1', 'CATHROCK', 'CLEMGPWF', 'HALLWF2', 'HDWF2',
'LKBONNY2', 'MTMILLAR', 'NBHWF1', 'SNOWNTH1', 'SNOWSTH1',
'STARHLWF', 'WATERLWF', 'WPWF']
FARM_NAME_LIST = ['Bluff Wind Farm', 'Cathedral Rocks Wind Farm',
'Clements Gap Wind Farm','Hallett 2 Wind Farm',
'Hornsdale Wind Farm 2', 'Lake Bonney Stage 2 Windfarm',
'Mt Millar Wind Farm', 'North Brown Hill Wind Farm',
'Snowtown Wind Farm Stage 2 North',
'Snowtown South Wind Farm', 'Starfish Hill Wind Farm',
'Waterloo Wind Farm', 'Wattle Point Wind Farm']
def connect_db(MONGO_URI):
"""Connect to MongoDB & return the client object."""
return MongoClient(MONGO_URI)
def fetch_data(client, farm, limit):
"""Get the last N row of data."""
time_start = time.time()
db = client['wpp']
print(f'Fetching data for {farm}...', end='', flush=True)
col = db[farm]
if limit == None:
df = pd.DataFrame(col.find({}, batch_size=10000).sort('_id', -1))
else:
df = pd.DataFrame(
col.find({}, batch_size=1000).sort('_id', -1).limit(limit))
if '_id' in df.columns:
df = df.rename(columns={'_id': 'time'})
runtime = round(time.time()-time_start, 2)
print(f' Done! Fetched {len(df)} documents in {runtime} s')
return df
def update_db(farm, update_df, upsert=True):
"""Update database via bulk write."""
if 'time' in update_df.columns:
update_df = update_df.rename(columns={'time': '_id'})
client = connect_db(MONGO_URI)
db = client['wpp']
ops = []
for i in range(len(update_df)):
_id = update_df.iloc[i]._id
data = update_df.iloc[i].to_dict()
ops.append(UpdateOne({'_id': _id}, {'$set': data}, upsert=upsert))
db[farm].bulk_write(ops)
def fill_val(raw, offset):
"""Fill missing value with the mean of the -24h and +24h data.
offset is the rows for the +24h/-24h, for 1h interval is 24,
for 5min interval is 288.
"""
df = raw.copy(deep=True)
for item in df.drop('time', axis=1).columns:
for i in df[df.isna().any(1)].index:
# Take into consideration if missing values don't have -24h and +24h data
try:
v_plus = df[item][i+offset]
except:
v_plus = np.nan
try:
v_minus = df[item][i-offset]
except:
v_minus = np.nan
# fill with the with the mean of the -24h and +24h data if they both exist
# otherwise, just fill with the one that exists
if not pd.isnull(v_plus) and not | pd.isnull(v_minus) | pandas.isnull |
import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, Index
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
c = pd.Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = pd.Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize(
("data", "f"),
(
([1, 1, np.nan], pd.isna),
([1, 2, np.nan], pd.isna),
([1, 1, np.nan], {1: False}),
([1, 2, np.nan], {1: False, 2: False}),
([1, 1, np.nan], pd.Series([False, False])),
([1, 2, np.nan], | pd.Series([False, False, False]) | pandas.Series |
'''
Created on May 16, 2018
@author: cef
significant scripts for calculating damage within the ABMRI framework
for secondary data loader scripts, see fdmg.datos.py
'''
#===============================================================================
# IMPORT STANDARD MODS -------------------------------------------------------
#===============================================================================
import logging, os, time, re, math, copy, gc, weakref, random, sys
import pandas as pd
import numpy as np
import scipy.integrate
#===============================================================================
# shortcuts
#===============================================================================
from collections import OrderedDict
from hlpr.exceptions import Error
from weakref import WeakValueDictionary as wdict
from weakref import proxy
from model.sofda.hp.basic import OrderedSet
from model.sofda.hp.pd import view
idx = pd.IndexSlice
#===============================================================================
# IMPORT CUSTOM MODS ---------------------------------------------------------
#===============================================================================
#import hp.plot
import model.sofda.hp.basic as hp_basic
import model.sofda.hp.pd as hp_pd
import model.sofda.hp.oop as hp_oop
import model.sofda.hp.sim as hp_sim
import model.sofda.hp.data as hp_data
import model.sofda.hp.dyno as hp_dyno
import model.sofda.hp.sel as hp_sel
import model.sofda.fdmg.datos_fdmg as datos
#import matplotlib.pyplot as plt
#import matplotlib
#import matplotlib.animation as animation #load the animation module (with the new search path)
#===============================================================================
# custom shortcuts
#===============================================================================
from model.sofda.fdmg.house import House
#from model.sofda.fdmg.dfunc import Dfunc
from model.sofda.fdmg.dmgfeat import Dmg_feat
# logger setup -----------------------------------------------------------------------
mod_logger = logging.getLogger(__name__)
mod_logger.debug('initilized')
#===============================================================================
#module level defaults ------------------------------------------------------
#===============================================================================
#datapars_cols = [u'dataname', u'desc', u'datafile_tailpath', u'datatplate_tailpath', u'trim_row'] #headers in the data tab
datafile_types_list = ['.csv', '.xls']
class Fdmg( #flood damage model
hp_sel.Sel_controller, #no init
hp_dyno.Dyno_wrap, #add some empty containers
#hp.plot.Plot_o, #build the label
hp_sim.Sim_model, #Sim_wrap: attach the reset_d. Sim_model: inherit attributes
hp_oop.Trunk_o, #no init
#Parent_cmplx: attach empty kids_sd
#Parent: set some defaults
hp_oop.Child):
"""
#===========================================================================
# INPUTS
#===========================================================================
pars_path ==> pars_file.xls
main external parameter spreadsheet.
See description in file for each column
dataset parameters
tab = 'data'. expected columns: datapars_cols
session parameters
tab = 'gen'. expected rows: sessionpars_rows
"""
#===========================================================================
# program parameters
#===========================================================================
name = 'fdmg'
#list of attribute names to try and inherit from the session
try_inherit_anl = set(['ca_ltail', 'ca_rtail', 'mind', \
'dbg_fld_cnt', 'legacy_binv_f', 'gis_area_max', \
'fprob_mult', 'flood_tbl_nm', 'gpwr_aep', 'dmg_rat_f',\
'joist_space', 'G_anchor_ht', 'bsmt_opn_ht_code','bsmt_egrd_code', \
'damp_func_code', 'cont_val_scale', 'hse_skip_depth', \
'area_egrd00', 'area_egrd01', 'area_egrd02',
'fhr_nm', 'write_fdmg_sum', 'dfeat_xclud_price',
'write_fdmg_sum_fly',
])
fld_aep_spcl = 100 #special flood to try and include in db runs
bsmt_egrd = 'wet' #default value for bsmt_egrd
legacy_binv_f = True #flag to indicate that the binv is in legacy format (use indicies rather than column labels)
gis_area_max = 3500
acode_sec_d = dict() #available acodes with dfunc data loaded (to check against binv request) {acode:asector}
'consider allowing the user control of these'
gis_area_min = 5
gis_area_max = 5000
write_fdmg_sum_fly = False
write_dmg_fly_first = True #start off to signifiy first run
#===========================================================================
# debuggers
#===========================================================================
write_beg_hist = True #whether to write the beg history or not
beg_hist_df = None
#===========================================================================
# user provided values
#===========================================================================
#legacy pars
floor_ht = 0.0
mind = '' #column to match between data sets and name the house objects
#EAD calc
ca_ltail ='flat'
ca_rtail =2 #aep at which zero value is assumeed. 'none' uses lowest aep in flood set
#Floodo controllers
gpwr_aep = 100 #default max aep where gridpower_f = TRUE (when the power shuts off)
dbg_fld_cnt = '0' #for slicing the number of floods we want to evaluate
#area exposure
area_egrd00 = None
area_egrd01 = None
area_egrd02 = None
#Dfunc controllers
place_codes = None
dmg_types = None
flood_tbl_nm = None #name of the flood table to use
#timeline deltas
'just keeping this on the fdmg for simplicitly.. no need for flood level heterogenieyt'
wsl_delta = 0.0
fprob_mult = 1.0 #needs to be a float for type matching
dmg_rat_f = False
#Fdmg.House pars
joist_space = 0.3
G_anchor_ht = 0.6
bsmt_egrd_code = 'plpm'
damp_func_code = 'seep'
bsmt_opn_ht_code = '*min(2.0)'
hse_skip_depth = -4 #depth to skip house damage calc
fhr_nm = ''
cont_val_scale = .25
write_fdmg_sum = True
dfeat_xclud_price = 0.0
#===========================================================================
# calculation parameters
#===========================================================================
res_fancy = None
gpwr_f = True #placeholder for __init__ calcs
fld_aep_l = None
dmg_dx_base = None #results frame for writing
plotr_d = None #dictionary of EAD plot workers
dfeats_d = dict() #{tag:dfeats}. see raise_all_dfeats()
fld_pwr_cnt = 0
seq = 0
#damage results/stats
dmgs_df = None
dmgs_df_wtail = None #damage summaries with damages for the tail logic included
ead_tot = 0
dmg_tot = 0
#===========================================================================
# calculation data holders
#===========================================================================
dmg_dx = None #container for full run results
bdry_cnt = 0
bwet_cnt = 0
bdamp_cnt = 0
def __init__(self,*vars, **kwargs):
logger = mod_logger.getChild('Fdmg')
#=======================================================================
# initilize cascade
#=======================================================================
super(Fdmg, self).__init__(*vars, **kwargs) #initilzie teh baseclass
#=======================================================================
# object updates
#=======================================================================
self.reset_d.update({'ead_tot':0, 'dmgs_df':None, 'dmg_dx':None,\
'wsl_delta':0}) #update the rest attributes
#=======================================================================
# defaults
#=======================================================================
if not self.session._write_data:
self.write_fdmg_sum = False
if not self.dbg_fld_cnt == 'all':
self.dbg_fld_cnt = int(float(self.dbg_fld_cnt))
#=======================================================================
# pre checks
#=======================================================================
if self.db_f:
#model assignment
if not self.model.__repr__() == self.__repr__():
raise IOError
#check we have all the datos we want
dname_exp = np.array(('rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl'))
boolar = np.invert(np.isin(dname_exp, self.session.pars_df_d['datos']))
if np.any(boolar):
"""allowing this?"""
logger.warning('missing %i expected datos: %s'%(boolar.sum(), dname_exp[boolar]))
#=======================================================================
#setup functions
#=======================================================================
#par cleaners/ special loaders
logger.debug("load_hse_geo() \n")
self.load_hse_geo()
logger.info('load and clean dfunc data \n')
self.load_pars_dfunc(self.session.pars_df_d['dfunc']) #load the data functions to damage type table
logger.debug('\n')
self.setup_dmg_dx_cols()
logger.debug('load_submodels() \n')
self.load_submodels()
logger.debug('init_dyno() \n')
self.init_dyno()
#outputting setup
if self.write_fdmg_sum_fly:
self.fly_res_fpath = os.path.join(self.session.outpath, '%s fdmg_res_fly.csv'%self.session.tag)
logger.info('Fdmg model initialized as \'%s\' \n'%(self.name))
return
#===========================================================================
# def xxxcheck_pars(self): #check your data pars
# #pull the datas frame
# df_raw = self.session.pars_df_d['datos']
#
# #=======================================================================
# # check mandatory data objects
# #=======================================================================
# if not 'binv' in df_raw['name'].tolist():
# raise Error('missing \'binv\'!')
#
# #=======================================================================
# # check optional data objects
# #=======================================================================
# fdmg_tab_nl = ['rfda_curve', 'binv','dfeat_tbl', 'fhr_tbl']
# boolidx = df_raw['name'].isin(fdmg_tab_nl)
#
# if not np.all(boolidx):
# raise IOError #passed some unexpected data names
#
# return
#===========================================================================
def load_submodels(self):
logger = self.logger.getChild('load_submodels')
self.state = 'load'
#=======================================================================
# data objects
#=======================================================================
'this is the main loader that builds all teh children as specified on the data tab'
logger.info('loading dat objects from \'fdmg\' tab')
logger.debug('\n \n')
#build datos from teh data tab
'todo: hard code these class types (rather than reading from teh control file)'
self.fdmgo_d = self.raise_children_df(self.session.pars_df_d['datos'], #df to raise on
kid_class = None) #should raise according to df entry
self.session.prof(state='load.fdmg.datos')
'WARNING: fdmgo_d is not set until after ALL the children on this tab are raised'
#attach special children
self.binv = self.fdmgo_d['binv']
"""NO! this wont hold resetting updates
self.binv_df = self.binv.childmeta_df"""
#=======================================================================
# flood tables
#=======================================================================
self.ftblos_d = self.raise_children_df(self.session.pars_df_d['flood_tbls'], #df to raise on
kid_class = datos.Flood_tbl) #should raise according to df entry
#make sure the one we are loking for is in there
if not self.session.flood_tbl_nm in list(self.ftblos_d.keys()):
raise Error('requested flood table name \'%s\' not found in loaded sets'%self.session.flood_tbl_nm)
'initial call which only udpates the binv_df'
self.set_area_prot_lvl()
if 'fhr_tbl' in list(self.fdmgo_d.keys()):
self.set_fhr()
#=======================================================================
# dfeats
#======================================================================
if self.session.load_dfeats_first_f & self.session.wdfeats_f:
logger.debug('raise_all_dfeats() \n')
self.dfeats_d = self.fdmgo_d['dfeat_tbl'].raise_all_dfeats()
#=======================================================================
# raise houses
#=======================================================================
#check we have all the acodes
self.check_acodes()
logger.info('raising houses')
logger.debug('\n')
self.binv.raise_houses()
self.session.prof(state='load.fdmg.houses')
'calling this here so all of the other datos are raised'
#self.rfda_curve = self.fdmgo_d['rfda_curve']
"""No! we need to get this in before the binv.reset_d['childmeta_df'] is set
self.set_area_prot_lvl() #apply the area protectino from teh named flood table"""
logger.info('loading floods')
logger.debug('\n \n')
self.load_floods()
self.session.prof(state='load.fdmg.floods')
logger.debug("finished with %i kids\n"%len(self.kids_d))
return
def setup_dmg_dx_cols(self): #get teh columns to use for fdmg results
"""
This is setup to generate a unique set of ordered column names with this logic
take the damage types
add mandatory fields
add user provided fields
"""
logger = self.logger.getChild('setup_dmg_dx_cols')
#=======================================================================
#build the basic list of column headers
#=======================================================================
#damage types at the head
col_os = OrderedSet(self.dmg_types) #put
#basic add ons
_ = col_os.update(['total', 'hse_depth', 'wsl', 'bsmt_egrd', 'anchor_el'])
#=======================================================================
# special logic
#=======================================================================
if self.dmg_rat_f:
for dmg_type in self.dmg_types:
_ = col_os.add('%s_rat'%dmg_type)
if not self.wsl_delta==0:
col_os.add('wsl_raw')
"""This doesnt handle runs where we start with a delta of zero and then add some later
for these, you need to expplicitly call 'wsl_raw' in the dmg_xtra_cols_fat"""
#ground water damage
if 'dmg_gw' in self.session.outpars_d['Flood']:
col_os.add('gw_f')
#add the dem if necessary
if 'gw_f' in col_os:
col_os.add('dem_el')
#=======================================================================
# set pars based on user provided
#=======================================================================
#s = self.session.outpars_d[self.__class__.__name__]
#extra columns for damage resulst frame
if self.db_f or self.session.write_fdmg_fancy:
logger.debug('including extra columns in outputs')
#clewan the extra cols
'todo: move this to a helper'
if hasattr(self.session, 'xtra_cols'):
try:
dc_l = eval(self.session.xtra_cols) #convert to a list
except:
logger.error('failed to convert \'xtra_cols\' to a list. check formatting')
raise IOError
else:
dc_l = ['wsl_raw', 'gis_area', 'acode_s', 'B_f_height', 'BS_ints','gw_f']
if not isinstance(dc_l, list): raise IOError
col_os.update(dc_l) #add these
self.dmg_df_cols = col_os
logger.debug('set dmg_df_cols as: %s'%self.dmg_df_cols)
return
def load_pars_dfunc(self,
df_raw=None): #build a df from the dfunc tab
"""
20190512: upgraded to handle nores and mres types
"""
#=======================================================================
# defaults
#=======================================================================
logger = self.logger.getChild('load_pars_dfunc')
#list of columns to expect
exp_colns = np.array(['acode','asector','place_code','dmg_code','dfunc_type','anchor_ht_code'])
if df_raw is None:
df_raw = self.session.pars_df_d['dfunc']
logger.debug('from df %s: \n %s'%(str(df_raw.shape), df_raw))
#=======================================================================
# clean
#=======================================================================
df1 = df_raw.dropna(axis='columns', how='all').dropna(axis='index', how='all') #drop rows with all na
df1 = df1.drop(columns=['note', 'rank'], errors='ignore') #drop some columns we dont need
#=======================================================================
# checking
#=======================================================================
#expected columns
boolar = np.invert(np.isin(exp_colns, df1.columns))
if np.any(boolar):
raise Error('missing %i expected columns\n %s'%(boolar.sum, exp_colns[boolar]))
#rfda garage logic
boolidx = np.logical_and(df1['place_code'] == 'G', df1['dfunc_type'] == 'rfda')
if np.any(boolidx):
raise Error('got dfunc_type = rfda for a garage curve (no such thing)')
#=======================================================================
# calculated columns
#=======================================================================
df2 = df1.copy()
df2['dmg_type'] = df2['place_code'] + df2['dmg_code']
"""as acode whill change, we want to keep the name static
df2['name'] = df2['acode'] + df2['dmg_type']"""
df2['name'] = df2['dmg_type']
#=======================================================================
# data loading
#=======================================================================
if 'tailpath' in df2.columns:
boolidx = ~ | pd.isnull(df2['tailpath']) | pandas.isnull |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scikitplot
from sklearn.metrics import classification_report
from sklearn.utils import class_weight
import argparse
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Conv2D, Flatten
from tensorflow.keras.layers import MaxPooling2D, Dropout, BatchNormalization, GlobalMaxPooling2D, SeparableConv2D
from tensorflow.keras.optimizers import Adam, Nadam
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.losses import BinaryCrossentropy, CategoricalCrossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# command line argument
ap = argparse.ArgumentParser()
ap.add_argument("--size", type=int, help="image size", default=224)
ap.add_argument("--classes", type=int, help="emotion classes", default=7)
ap.add_argument("--epochs", type=int, default=20)
size = ap.parse_args().size
classes = ap.parse_args().classes
epochs = ap.parse_args().epochs
# Load AffectNet Data from 'data/AffectNet'
def prepare_dataset(num_class):
print('Dataset loading')
batch_size = 64
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=15,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.15,
zoom_range=0.15,
horizontal_flip=True
)
val_datagen = ImageDataGenerator(rescale=1. / 255)
df_train = pd.read_pickle('df_train.pkl')
df_val = pd.read_pickle('df_val.pkl')
if num_class == 5:
df_train = df_train[
(df_train['y_col'] != 'Contempt') & (df_train['y_col'] != 'Disgust') & (df_train['y_col'] != 'Fear')]
df_val = df_val[
(df_val['y_col'] != 'Contempt') & (df_val['y_col'] != 'Disgust') & (df_val['y_col'] != 'Fear')]
elif num_class == 7:
df_train = df_train[(df_train['y_col'] != 'Contempt')]
df_val = df_val[(df_val['y_col'] != 'Contempt')]
df_train = df_train.drop(df_train[df_train['y_col'] == 'Neutral'].sample(50000).index)
df_train = df_train.drop(df_train[df_train['y_col'] == 'Happiness'].sample(100000).index)
train_generator = train_datagen.flow_from_dataframe(
df_train,
x_col='x_col',
y_col='y_col',
target_size=(size, size),
batch_size=batch_size,
color_mode="rgb",
class_mode='categorical'
)
val_generator = val_datagen.flow_from_dataframe(
df_val,
x_col='x_col',
y_col='y_col',
shuffle=False,
target_size=(size, size),
batch_size=batch_size,
color_mode="rgb",
class_mode='categorical'
)
class_weights = dict(enumerate(
class_weight.compute_class_weight(class_weight='balanced', classes=np.unique(df_train['y_col']),
y=df_train['y_col'])))
num_train = len(df_train)
num_val = len(df_val)
dataset_lengths = {'train': num_train, 'val': num_val}
return train_generator, val_generator, class_weights, dataset_lengths
# Create the model
def get_model():
print('Creating recognizer model')
input_shape = (size, size, 3)
weight_decay = 0.01
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape,
kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu',
kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu',
kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), activation='relu',
kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, kernel_size=(3, 3), activation='relu',
kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(Dropout(0.4))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu', kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu', kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', kernel_regularizer=l1_l2(0, weight_decay),
bias_regularizer=l1_l2(0, weight_decay)))
model.add(BatchNormalization())
model.add(Dense(classes, activation='softmax'))
model.summary()
return model
# Save results of training in root folder
def save_results(model_type, model_history, val_gen):
# save progress and charts
print('Saving results')
epoch = model_history.epoch
accuracy = model_history.history['accuracy']
val_accuracy = model_history.history['val_accuracy']
loss = model_history.history['loss']
val_loss = model_history.history['val_loss']
model_type.save_weights('model.h5')
sns.set()
fig = plt.figure(0, (12, 4))
ax = plt.subplot(1, 2, 1)
sns.lineplot(x=epoch, y=accuracy, label='train')
sns.lineplot(x=epoch, y=val_accuracy, label='valid')
plt.title('Accuracy')
plt.tight_layout()
ax = plt.subplot(1, 2, 2)
sns.lineplot(x=epoch, y=loss, label='train')
sns.lineplot(x=epoch, y=val_loss, label='valid')
plt.title('Loss')
plt.tight_layout()
plt.savefig('epoch_history.png')
plt.close(fig)
# plot performance distribution
df_accu = pd.DataFrame({'train': accuracy, 'valid': val_accuracy})
df_loss = pd.DataFrame({'train': loss, 'valid': val_loss})
dist_fig = plt.figure(1, (14, 4))
ax = plt.subplot(1, 2, 1)
sns.violinplot(x="variable", y="value", data=pd.melt(df_accu), showfliers=False)
plt.title('Accuracy')
plt.tight_layout()
ax = plt.subplot(1, 2, 2)
sns.violinplot(x="variable", y="value", data= | pd.melt(df_loss) | pandas.melt |
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import subprocess
import pydot
import numpy as np
from os.path import join
from sklearn.tree import export_graphviz
from io import StringIO
def plot_blocks(data, init_day,translator, block_info=None, end_day=None, to_file=False, output_path=None):
if end_day == None:
end_day = init_day
# Check if it is only one day
if init_day == end_day:
one_day = True
else:
one_day = False
# Convert init_day and end_day to datetime.day
if not isinstance(init_day, datetime.date):
if isinstance(init_day, datetime):
init_day = init_day.date()
else:
raise TypeError("init_day must be a datetime object")
if not isinstance(end_day, datetime.date):
if isinstance(end_day, datetime):
end_day = end_day.date()
else:
raise TypeError("end_day must be a datetime object")
# Get sample from init_datetime to end_datetime
auto_gluc_blocks_sample = data[(data["Day_Block"] >= init_day) & (data["Day_Block"] <= end_day)]
if block_info is not None:
block_info_sample = block_info[(block_info["Day_Block"] >= init_day) & (block_info["Day_Block"] <= end_day)]
# Smooth glucose data
smoothed_sample = smooth_plot(auto_gluc_blocks_sample)
# Generate figure
fig, ax = plt.subplots()
labels = []
for key, grp in smoothed_sample.groupby(['Block', 'Day_Block']):
grp_axis = smoothed_sample[smoothed_sample['Day_Block'] == key[1]]
grp_axis.loc[grp_axis['Block'] != key[0], "Glucose_Auto"] = np.nan
if one_day:
label = "{} {:d}".format(translator.translate_to_language(["Block"])[0], key[0])
else:
label = "{} {:d} ({:%d/%m}) ".format(translator.translate_to_language(["Block"])[0], key[0], key[1])
ax = grp_axis.plot(ax=ax, kind='line', x="Datetime", y="Glucose_Auto", label=label)
if block_info is not None:
for i, dt in enumerate(block_info_sample["Block_Meal"]):
if not pd.isnull(dt):
plt.axvline(dt, color='grey', linestyle='--', label='Carbo.' if i == 0 else "")
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if one_day:
#ax.legend(loc='best')
ax.legend(loc='center left', bbox_to_anchor = (1, 0.5))
else:
ax.legend()
#Set figure size
fig.set_facecolor("white")
fig.set_size_inches(12, 5, forward=True)
#Export figure
if to_file:
if output_path is not None:
path = join(output_path, "{}.png".format(init_day.strftime("%d-%m-%y")))
else:
path = "{}.png".format(init_day.strftime("%d-%m-%y"))
plt.savefig(path)
plt.close()
return path
else:
plt.show()
def smooth_plot(data):
# Level of granularity of time axis (Time difference between points)
interval_min = 1
# Define bounds of the plot
min_time = data["Datetime"].min()
max_time = data["Datetime"].max()
difference = (max_time - min_time + datetime.timedelta(minutes=1))
min_diff = (difference.days * 24 * 60) + (difference.seconds / 60)
smoothed_data = pd.DataFrame((min_time + datetime.timedelta(minutes=x * interval_min)
for x in range(0, int((min_diff / interval_min)))), columns=["Datetime"])
smoothed_data = | pd.merge(smoothed_data, data, how='left', on="Datetime") | pandas.merge |
from atmPy.aerosols.instruments import POPS
import icarus
import pathlib
import numpy as np
import xarray as xr
import pandas as pd
from ipywidgets import widgets
from IPython.display import display
import matplotlib.pylab as plt
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
from nsasci import database
import sqlite3
def read_POPS(path):
# print(path.glob('*'))
hk = POPS.read_housekeeping(path, pattern = 'hk', skip_histogram= True)
hk.get_altitude()
return hk
def read_iMet(path):
ds = xr.open_dataset(path)
# return ds
alt_gps = ds['GPS altitude [km]'].to_pandas()
alt_bar = ds['altitude (from iMet PTU) [km]'].to_pandas()
df = pd.DataFrame({'alt_gps': alt_gps,
'alt_bar': alt_bar})
df *= 1000
return df
class Data_container(object):
def __init__(self, controller, path2data1, path2data2 = None):
self.controller = controller
self.delta_t = 0
path2data1 = pathlib.Path(path2data1)
read_data = read_iMet #icarus.icarus_lab.read_imet
self.dataset1 = Data(self, path2data1, read_data, glob_pattern = 'oli*')
if not isinstance(path2data2, type(None)):
path2data2 = pathlib.Path(path2data2)
read_data = read_POPS
self.dataset2 = Data(self, path2data2, read_data, glob_pattern='olitbspops*')
else:
self.dataset2 = None
class Data(object):
def __init__(self, datacontainer, path2data, read_data, load_all = True , glob_pattern = '*'):
self.controller = datacontainer.controller
self._datacontainer = datacontainer
self.read_data = read_data
self.path2data = path2data
self._path2active = None
self.path2data_list = sorted(list(path2data.glob(glob_pattern)))
if load_all:
self.load_all()
self._load_all = load_all
self.path2active = self.path2data_list[0]
def load_all(self):
data_list = []
data_info_list = []
for path in self.path2data_list:
df = self.read_data(path)
add_on = path.name.split('.')[-2][-2:]
df.columns = ['_'.join([col, add_on]) for col in df.columns]
data_list.append(df)
dffi = dict(t_start=df.index.min(),
t_end=df.index.max(),
v_max=df.max().max(),
v_min=df.min().min())
data_info_list.append( | pd.DataFrame(dffi, index=[path.name]) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.types import plant
from operational_analysis.methods import plant_analysis
from examples.operational_AEP_analysis.project_EIA import Project_EIA
class TestPandasPrufPlantAnalysis(unittest.TestCase):
def setUp(self):
np.random.seed(42)
# Set up data to use for testing (EIA example plant)
self.project = Project_EIA('./examples/operational_AEP_analysis/data')
self.project.prepare()
self.analysis = plant_analysis.MonteCarloAEP(self.project)
def test_validate_data(self):
self.assertTrue(self.project.validate(), "Failed to validate PlantData from schema")
def test_plant_analysis(self):
df = self.analysis._monthly.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy(df)
self.check_process_loss_estimates(df)
self.check_process_reanalysis_data(df)
# Check outlier filtering
self.check_filter_outliers()
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=2000, reanal_subset=['ncep2', 'merra2', 'erai'])
sim_results = self.analysis.results
self.check_simulation_results(sim_results)
def check_process_revenue_meter_energy(self, df):
# Energy Nan flags are all zero
nptest.assert_array_equal(df['energy_nan_perc'].as_matrix(), np.repeat(0.0, df.shape[0]))
# Expected number of days per month are equal to number of actual days
nptest.assert_array_equal(df['num_days_expected'], df['num_days_actual'])
# Check a few energy values
expected_gwh = pd.Series([6.765, 5.945907, 8.576])
actual_gwh = df.loc[pd.to_datetime(['2003-12-01', '2010-05-01', '2015-01-01']), 'energy_gwh']
nptest.assert_array_almost_equal(expected_gwh, actual_gwh)
def check_process_loss_estimates(self, df):
# Availablity, curtailment nan fields both 0, NaN flag is all False
nptest.assert_array_equal(df['avail_nan_perc'].as_matrix(), np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df['curt_nan_perc'].as_matrix(), np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df['nan_flag'].as_matrix(), np.repeat(False, df.shape[0]))
# Check a few reported availabilty and curtailment values
expected_avail_gwh = pd.Series([0.236601, 0.161961, 0.724330])
expected_curt_gwh = pd.Series([0.122979, 0.042608, 0.234614])
expected_avail_pct = pd.Series([0.033209, 0.026333, 0.075966])
expected_curt_pct = | pd.Series([0.017261, 0.006928, 0.024606]) | pandas.Series |
__author__ = 'heroico'
import os
import io
import json
import re
import logging
import gzip
from . import Exceptions
import pandas
import numpy
VALID_ALLELES = ["A", "T", "C", "G"]
def hapName(name):
return name + ".hap.gz"
def legendName(name):
return name + ".legend.gz"
def dosageName(name):
return name + ".dosage.gz"
def dosageNamesFromFolder(folder):
names = contentsWithRegexpFromFolder(folder, ".*.dosage.gz")
if not names:
names = contentsWithRegexpFromFolder(folder, ".*.dos.gz")
return names
def hapNamesFromFolder(folder):
names = namesWithPatternFromFolder(folder, ".hap.gz")
return names
def legendNamesFromFolder(folder):
names = namesWithPatternFromFolder(folder, ".legend.gz")
return names
def namesWithPatternFromFolder(folder, pattern):
contents = os.listdir(folder)
names = []
for content in contents:
if pattern in content:
name = content.split(pattern)[0]
names.append(name)
return names
def contentsWithPatternsFromFolder(folder, patterns):
try:
contents = os.listdir(folder)
paths = []
for content in contents:
matches = True
for pattern in patterns:
if not pattern in content:
matches = False
break
if matches:
paths.append(content)
except OSError:
raise Exceptions.BadDirectory(folder)
return paths
def contentsWithRegexpFromFolder(folder, regexp):
if type(regexp) == str:
regexp = re.compile(regexp)
contents = os.listdir(folder)
paths = [x for x in contents if regexp.match(x)] if regexp else contents
return paths
def target_files(input_folder, file_filters=None):
files = os.listdir(input_folder)
if file_filters:
patterns = [re.compile(x) for x in file_filters]
files = [x for x in files if all([r.match(x) for r in patterns])]
files = [os.path.join(input_folder, x) for x in files]
return files
def samplesInputPath(path):
samples_content = contentsWithPatternsFromFolder(path, [".sample"])
if len(samples_content) == 0:
samples_content = contentsWithPatternsFromFolder(path, ["samples.txt"])
samples_path = None
if len(samples_content) > 0:
samples_file = samples_content[0]
samples_path = os.path.join(path, samples_file)
return samples_path
def checkSubdirectorySanity(base, candidate):
sane = True
abs_base = os.path.realpath(os.path.abspath(base))
abs_candidate = os.path.realpath(os.path.abspath(candidate))
if abs_base == abs_candidate:
return False
rel_base_to_candidate = os.path.relpath(abs_base, abs_candidate)
if not rel_base_to_candidate.startswith(os.pardir):
return False
return sane
open
class PercentReporter(object):
def __init__(self, level, total, increment=10, pattern="%i percent complete"):
self.level = level
self.total = total
self.increment = increment
self.last_reported = 0
self.pattern = pattern
def update(self, i, text=None, force=False):
percent = int(i*100.0/self.total)
if force or percent >= self.last_reported + self.increment:
self.last_reported = percent
if not text:
text = self.pattern
logging.log(self.level, text, percent)
def load_json(path):
d = None
with open(path) as file:
d = json.load(file)
return d
class FileIterator(object):
def __init__(self, path, header=None, compressed = False, ignore_until_header = False):
self.path = path
self.compressed = compressed
self.header = header
self.ignore_until_header = ignore_until_header
if ignore_until_header and not header:
raise Exceptions.InvalidArguments("File iterator received conflicting header information")
def iterate(self,callback=None):
if self.compressed:
with gzip.open(self.path, 'r') as file_object:
self._iterateOverFile(io.TextIOWrapper(file_object, newline=""), callback)
else:
with open(self.path, 'r') as file_object:
self._iterateOverFile(file_object, callback)
def _iterateOverFile(self, file_object, callback):
if self.ignore_until_header:
self._ignore_until_header(file_object)
else:
if self.header is not None:
line = file_object.readline().strip("\n")
if len(self.header) and line != self.header:
raise Exceptions.MalformedInputFile(self.path, "Unexpected header")
self._processFile(file_object, callback)
def _ignore_until_header(self, file_object):
if self.ignore_until_header and self.header:
skip = True
while skip:
l = file_object.readline()
if not l:
raise Exceptions.InvalidArguments("Wrong header lookup in %s" % (self.path,))
l = l.strip()
if self.header in l:
skip = False
def _processFile(self, file_object, callback):
if callback is not None:
for i,line in enumerate(file_object):
callback(i, line)
def open_any_plain_file(path):
if "gz" in path:
return io.TextIOWrapper(gzip.open(path), newline="")
else:
return open(path)
def generate_from_any_plain_file(path, skip_n=None):
is_gz = ".gz" in path
with open_any_plain_file(path) as f:
if skip_n:
for i in range(0, skip_n):
f.readline()
for l in f:
yield l
import csv
class CSVFileIterator(FileIterator):
def __init__(self, path, header=None, compressed = False, ignore_until_header = False, delimiter = " ", quotechar='"'):
super(CSVFileIterator, self).__init__(path, header, compressed, ignore_until_header)
self.delimiter = delimiter
self.quotechar = quotechar
def _processFile(self, file_object, callback):
if callback is not None:
reader = csv.reader(file_object, delimiter=self.delimiter, quotechar=self.quotechar)
for i,row in enumerate(reader):
callback(i, row)
def TS(string):
"""placeholder for string translation"""
return string
def ensure_requisite_folders(path):
folder = os.path.split(path)[0]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
def to_dataframe(data, columns,to_numeric=None, fill_na=None):
data = list(zip(*data))
if to_numeric:
data = [ | pandas.to_numeric(x, errors=to_numeric) | pandas.to_numeric |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for core methods in the Data Commons Python Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pandas.util.testing import assert_series_equal, assert_frame_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json
req = kwargs['json']
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_property_labels.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_labels']:
if req['dcids'] == ['geoId/0649670']:
# Response for sending a single dcid to get_property_labels
out_arcs = ['containedInPlace', 'name', 'geoId', 'typeOf']
res_json = json.dumps({
'geoId/0649670': {
'inLabels': [],
'outLabels': out_arcs
}
})
return MockResponse({"payload": res_json}, 200)
elif req['dcids'] == ['State', 'County', 'City']:
# Response for sending multiple dcids to get_property_labels
in_arcs = ['typeOf']
out_arcs = ['name', 'provenance', 'subClassOf', 'typeOf', 'url']
res_json = json.dumps({
'City': {'inLabels': in_arcs, 'outLabels': out_arcs},
'County': {'inLabels': in_arcs, 'outLabels': out_arcs},
'State': {'inLabels': in_arcs, 'outLabels': out_arcs}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == ['dc/MadDcid']:
# Response for sending a dcid that doesn't exist to get_property_labels
res_json = json.dumps({
'dc/MadDcid': {
'inLabels': [],
'outLabels': []
}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == []:
# Response for sending no dcids to get_property_labels
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_property_values
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_values']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'containedInPlace'\
and req['value_type'] == 'Town':
# Response for sending a request for getting Towns containedInPlace of
# Santa Clara County and Montgomery County.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': 'Los Gatos',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
{
'dcid': 'geoId/0643294',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
}
],
'out': []
},
'geoId/24031': {
'in': [
{
'dcid': 'geoId/2462850',
'name': 'Poolesville',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'name':
# Response for sending a request for the name of multiple dcids.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': [
{
'value': 'Santa Clara County',
'provenanceId': 'dc/sm3m2w3',
},
]
},
'geoId/24031': {
'in': [],
'out': [
{
'value': 'Montgomery County',
'provenanceId': 'dc/sm3m2w3',
},
]
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'madProperty':
# Response for sending a request with a property that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': []
},
'geoId/24031': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']\
and req['property'] == 'containedInPlace':
# Response for sending a request with a single dcid that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
},
'dc/MadDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': {
'in': [],
'out': []
},
'dc/MadderDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == [] and req['property'] == 'containedInPlace':
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_triples
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_triples']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']:
# Response for sending a request with two valid dcids.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'geoId/24031': [
{
"subjectId": "geoId/24031",
"predicate": "name",
"objectValue": "Montgomery County"
},
{
"subjectId": "geoId/2467675",
"subjectName": "Rockville",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/24031",
"objectName": "Montgomery County"
},
{
"subjectId": "geoId/24031",
"predicate": "containedInPlace",
"objectId": "geoId/24",
"objectName": "Maryland"
},
]
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response for sending a request where one dcid does not exist.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'dc/MadDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': [],
'dc/MadderDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == []:
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPropertyLabels(unittest.TestCase):
""" Unit tests for get_property_labels. """
@mock.patch('requests.post', side_effect=post_request_mock)
def test_single_dcid(self, post_mock):
""" Calling get_property_labels with a single dcid returns a valid
result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['geoId/0649670'])
self.assertDictEqual(out_props,
{'geoId/0649670': ["containedInPlace", "name", "geoId", "typeOf"]})
# Test with out=False
in_props = dc.get_property_labels(['geoId/0649670'], out=False)
self.assertDictEqual(in_props, {'geoId/0649670': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_labels returns valid results with multiple
dcids.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['State', 'County', 'City']
expected_in = ["typeOf"]
expected_out = ["name", "provenance", "subClassOf", "typeOf", "url"]
# Test for outgoing property labels
out_props = dc.get_property_labels(dcids)
self.assertDictEqual(out_props, {
'State': expected_out,
'County': expected_out,
'City': expected_out,
})
# Test for incoming property labels
in_props = dc.get_property_labels(dcids, out=False)
self.assertDictEqual(in_props, {
'State': expected_in,
'County': expected_in,
'City': expected_in,
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_labels with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['dc/MadDcid'])
self.assertDictEqual(out_props, {'dc/MadDcid': []})
# Test for incoming property labels
in_props = dc.get_property_labels(['dc/MadDcid'], out=False)
self.assertDictEqual(in_props, {'dc/MadDcid': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_labels with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels([])
self.assertDictEqual(out_props, {})
# Test for incoming property labels
in_props = dc.get_property_labels([], out=False)
self.assertDictEqual(in_props, {})
class TestGetPropertyValues(unittest.TestCase):
""" Unit tests for get_property_values. """
# --------------------------- STANDARD UNIT TESTS ---------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_values with multiple dcids returns valid
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['geoId/06085', 'geoId/24031']
# Get the containedInPlace Towns for Santa Clara and Montgomery County.
towns = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
self.assertDictEqual(towns, {
'geoId/06085': ['geoId/0643294', 'geoId/0644112'],
'geoId/24031': ['geoId/2462850']
})
# Get the name of Santa Clara and Montgomery County.
names = dc.get_property_values(dcids, 'name')
self.assertDictEqual(names, {
'geoId/06085': ['Santa Clara County'],
'geoId/24031': ['Montgomery County']
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_values with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
bad_dcids_1 = ['geoId/06085', 'dc/MadDcid']
bad_dcids_2 = ['dc/MadDcid', 'dc/MadderDcid']
# Get entities containedInPlace of Santa Clara County and a dcid that does
# not exist.
contained_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
self.assertDictEqual(contained_1, {
'geoId/06085': ['geoId/0644112'],
'dc/MadDcid': []
})
# Get entities containedInPlace for two dcids that do not exist.
contained_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace')
self.assertDictEqual(contained_2, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_property(self, post_mock):
""" Calling get_property_values with a property that does not exist returns
empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get propery values for a property that does not exist.
prop_vals = dc.get_property_values(
['geoId/06085', 'geoId/24031'], 'madProperty')
self.assertDictEqual(prop_vals, {
'geoId/06085': [],
'geoId/24031': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_values with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get property values with an empty list of dcids.
prop_vals = dc.get_property_values([], 'containedInPlace')
self.assertDictEqual(prop_vals, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series(self, post_mock):
""" Calling get_property_values with a Pandas Series returns the correct
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series.
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = pd.Series([
['geoId/0643294', 'geoId/0644112'],
['geoId/2462850']
])
# Call get_property_values with the series as input
actual = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
| assert_series_equal(actual, expected) | pandas.util.testing.assert_series_equal |
"""
Desafio 2
Escreva uma classe utilizando a linguagem python que faça a conexão com o banco de dados Postgres,
utilizando a biblioteca Pandas:
a) Criar uma tabela;
b) Inserir uma linha contendo uma coluna indexada, uma coluna texto, uma coluna numérica,
uma coluna boolena e uma coluna datetime;
c) Faça o versionamento no github ou gitlab;
d) Crie uma branch no repositório chamado texte;
e) Nesta branch no repositório.
"""
import pandas as pd
import psycopg2
class ConectaTabelaDB: # classe para conectar com Postgres e manipular tabelas
# Conecta com o banco de dados:
def __init__(self):
self.con = psycopg2.connect(host='localhost',
database='desafiogt2',
user='postgres',
password='<PASSWORD>')
self.cur = self.con.cursor()
# Método para criar tabelas:
def criatabela(self, tab):
sql = f'CREATE TABLE {tab} (' \
f'id serial NOT NULL,' \
f'texto varchar(50),' \
f'numero int,' \
f'opcao bit,' \
f'data date,' \
f'PRIMARY KEY (id));'
self.cur.execute(sql)
self.con.commit()
self.con.close()
# Método para apagar tabelas:
def apagatabela(self, tab):
sql = f'DROP TABLE IF EXISTS {tab};'
self.cur.execute(sql)
self.con.commit()
self.con.close()
# Método para inserir dados:
def inseredados(self, tab, text, num, op, dat):
sql = f"INSERT INTO {tab} (texto, numero, opcao, data) VALUES " \
f"('{text}', '{num}', '{op}', '{dat}');"
self.cur.execute(sql)
self.con.commit()
self.con.close()
# Método para ler dados da tabela com Pandas:
def le_tabela(self, a):
self.cur = self.con.cursor()
self.cur.execute(f'SELECT * FROM {a};')
recset = self.cur.fetchall()
registros = []
for rec in recset:
registros.append(rec)
self.con.close()
df_bd = | pd.DataFrame(registros, columns=['id', 'texto', 'numero', 'opcao', 'data']) | pandas.DataFrame |
import json
from collections import OrderedDict
from itertools import repeat
from pathlib import Path
import pandas as pd
import torch
ROOT_PATH = Path(__file__).absolute().resolve().parent.parent.parent
def ensure_dir(dir_name):
dir_name = Path(dir_name)
if not dir_name.is_dir():
dir_name.mkdir(parents=True, exist_ok=False)
def read_json(file_name):
file_name = Path(file_name)
with file_name.open("rt") as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, file_name):
file_name = Path(file_name)
with file_name.open("wt") as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(dataloader):
"""
Wrapper function for endless dataloader.
"""
for loader in repeat(dataloader):
yield from loader
def prepare_device(n_gpu_use):
"""
Setup GPU device if available. Get gpu device indices which are used for DataParallel.
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
print("Warning: There's no GPU available on this machine, "
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
print(f"Warning: The number of GPU's configured to use is {n_gpu_use}, but only {n_gpu} are "
"available on this machine.")
n_gpu_use = n_gpu
print(f"Training will be performed on {n_gpu_use} GPUs.")
device = torch.device("cuda:0" if n_gpu_use > 0 else "cpu")
list_ids = list(range(n_gpu_use))
return device, list_ids
class MetricTracker:
def __init__(self, *keys, writer=None):
self.writer = writer
self.data = | pd.DataFrame(index=keys, columns=["total", "counts", "average"]) | pandas.DataFrame |
import pandas as pd
import re
from datetime import date
import config as config
class MainInsert:
def __init__(self):
self.camp=config.Config.CAMP
self.festival=config.Config.FESTIVAL
self.tour=config.Config.TOUR
self.camp_details=config.Config.CAMP_DETAILS
self.sigungu=config.Config.SIGUNGU
# 캠핑장, 축제, 관광지 전처리
def camping_data(self):
camp = self.camp.drop(['allar', 'siteMg1Co', 'siteMg1Vrticl', 'siteMg1Width', 'siteMg2Co', 'siteMg2Vrticl',
'siteMg2Width', 'siteMg3Co', 'siteMg3Vrticl', 'siteMg3Width', 'zipcode', 'resveCl', 'resveUrl',
'intro', 'direction', 'featureNm', 'hvofBgnde', 'hvofEnddle', 'tooltip'], 1)
festival = self.festival.drop(['areacode', 'cat1', 'cat2', 'cat3', 'contenttypeid', 'mlevel'], 1)
tour = self.tour.drop(['areacode', 'booktour', 'cat1', 'cat2', 'cat3', 'contenttypeid', 'mlevel',
'zipcode'], 1)
camp = camp.rename(columns={'addr1' : 'addr',
'animalCmgCl' : 'animal_cmg',
'autoSiteCo' : 'auto_site',
'brazierCl' : 'brazier',
'caravAcmpnyAt' : 'carav_acmpny',
'caravSiteCo' : 'carav_site',
'clturEventAt' : 'clturevent_at',
'contentId' : 'content_id',
'createdtime' : 'created_date',
'exprnProgrmAt' : 'exprnprogrm_at',
'extshrCo' : 'extshr',
'facltNm' : 'place_name',
'fireSensorCo' : 'firesensor',
'frprvtSandCo' : 'frprvtsand',
'frprvtWrppCo' : 'frprvtwrpp',
'glampSiteCo' : 'glamp_site',
'gnrlSiteCo' : 'gnrl_site',
'induty' : 'industry',
'indvdlCaravSiteCo' : 'indvdlcarav_site',
'insrncAt' : 'insrnc_at',
'manageNmpr' : 'manage_num',
'manageSttus' : 'manage_sttus',
'mangeDivNm' : 'mange',
'mapX' : 'lat',
'mapY' : 'lng',
'modifiedtime' : 'modified_date',
'operDeCl' : 'oper_date',
'operPdCl' : 'oper_pd',
'prmisnDe' : 'prmisn_date',
'siteBottomCl1' : 'site_bottom1',
'siteBottomCl2' : 'site_bottom2',
'siteBottomCl3' : 'site_bottom3',
'siteBottomCl4' : 'site_bottom4',
'siteBottomCl5' : 'site_bottom5',
'sitedStnc' : 'sited_stnc',
'swrmCo' : 'swrm_cnt',
'toiletCo' : 'toilet_cnt',
'trlerAcmpnyAt' : 'trler_acmpny',
'wtrplCo' : 'wtrpl_cnt',
'clturEvent' : 'clturevent',
'eqpmnLendCl' : 'eqpmn_lend',
'firstImageUrl' : 'first_image',
'posblFcltyCl' : 'posblfclty',
'posblFcltyEtc' : 'posblfclty_etc',
'sbrsCl' : 'sbrs',
'sbrsEtc' : 'sbrs_etc',
'themaEnvrnCl' : 'thema_envrn',
'tourEraCl' : 'tour_era',
'lctCl' : 'lct',
'facltDivNm' : 'faclt_div',
'lineIntro' : 'line_intro',
'trsagntNo' : 'trsagnt_no',
'mgcDiv' : 'mgc_div',
'glampInnerFclty' : 'glampinner_fclty',
'caravInnerFclty' : 'caravinner_fclty',
'sigungucode' : 'sigungu_code',
'exprnProgrm' : 'exprnprogrm',
})
camp['place_num'] = 0
festival =festival.rename(columns={'addr1' : 'addr',
'contentid' : 'content_id',
'createdtime' : 'created_date',
'eventenddate' : 'event_end_date',
'eventstartdate' : 'event_start_date',
'firstimage' : 'first_image',
'firstimage2' : 'second_image',
'mapx' : 'lat',
'mapy' : 'lng',
'modifiedtime' : 'modified_date',
'sigungucode' : 'sigungu_code',
'title' : 'place_name'})
festival['place_num'] = 1
tour = tour.rename(columns={'addr1' : 'addr',
'contentid' : 'content_id',
'firstimage' : 'first_image',
'firstimage2' : 'second_image',
'mapx' : 'lat',
'mapy' : 'lng',
'sigungucode' : 'sigungu_code',
'title' : 'place_name'})
tour['place_num'] = 2
# 캠핑장 크롤링 데이터 전처리
camp_details = self.camp_details.rename(columns={'view' : 'readcount'})
camp_details['readcount'] = camp_details['readcount'].str.split(' ').str[1]
datas = camp_details['link']
data = [re.findall("\d+",data)[0] for data in datas]
camp_details['url_num'] = data
camp_details['url_num'] = camp_details['url_num'].astype('int')
# 캠핑장 크롤링 과 API 데이터 merge
final_data = pd.merge(camp, camp_details, how='right', left_on='content_id', right_on='url_num')
return festival, tour, final_data
# 캠핑장, 축제, 관광지 데이터 concat
def concat_table(self, final_data, festival, tour):
dataset = final_data.drop(['title', 'address'], 1)
camp_festival = | pd.concat([dataset, festival], 0) | pandas.concat |
#!/bin/python
# <NAME>
# last updated: 06 December 2021
# version 1.1.0
import os
import argparse
import pandas as pd
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
desc = 'Clean your file up and add taxonomy information'
epi = """DESCRIPTION:
Input: merged .CSV file, taxonomy file
Output: final .CSV file
"""
parser = argparse.ArgumentParser(description=desc, epilog=epi,
formatter_class=CustomFormatter)
parser.add_argument('csv_file', metavar='csv_file', type=str, help='Input csv file')
parser.add_argument('out_prefix', metavar='out_prefix', type=str, help='output file prefix')
def move_column_inplace(df, col, pos):
col = df.pop(col)
df.insert(pos, col.name, col)
return df
def process_csv(file_name, out_prefix):
with open(file_name, 'r') as csv_file, open('taxonomy_file.csv') as tax_list:
csv_df = pd.read_csv(csv_file, sep=",")
# remove in-text lines with header information
csv_df = csv_df[~csv_df['organism'].isin(['organism'])]
# convert all columns (except for organism names into numeric dtypes)
cols = csv_df.columns.drop('organism')
csv_df[cols] = csv_df[cols].apply(pd.to_numeric, errors='coerce')
id_values_0 = csv_df["organism"]
id_values_1 = []
for items in id_values_0:
if items.startswith('NZ'):
id_values_1.append(items[14:])
elif items.startswith('NC'):
id_values_1.append(items[12:])
elif items.startswith('CP'):
id_values_1.append(items[11:])
elif items.startswith('CR'):
id_values_1.append(items[11:])
elif items.startswith('AE'):
id_values_1.append(items[11:])
elif items.startswith('AP'):
id_values_1.append(items[11:])
elif items.startswith('BK'):
id_values_1.append(items[20:])
elif items.startswith('HG'):
id_values_1.append(items[11:])
elif items.startswith('FM'):
id_values_1.append(items[11:])
elif items.startswith('CF'):
id_values_1.append(items[11:])
elif items.startswith('Ca22chr1A_C_albicans_SC5314___organism_'):
id_values_1.append(items[39:])
elif items.startswith('CH476594___organism_'):
id_values_1.append(items[20:])
elif items.startswith('Chr1_A_fumigatus_Af293___organism_'):
id_values_1.append(items[34:])
elif items.startswith('ChrI_A_nidulans_FGSC_A4___organism_'):
id_values_1.append(items[35:])
else:
id_values_1.append(items)
id_values_2 = []
for p in id_values_1:
if p.startswith("_"):
new_string = p.split("_")[1] + "," + p.split("_")[3]
id_values_2.append(new_string)
elif "organism" in p:
new_string_4 = p.split("organism_")[1]
new_string_5 = new_string_4.split("_")[:2]
id_values_2.append(new_string_5)
else:
new_string = p.split("_")
if new_string[0] == "1":
new_string_8 = new_string[3] + "," + new_string[4]
id_values_2.append(new_string_8)
else:
new_string_10 = new_string[:2]
new_string_11 = str(new_string_10)[1:-1]
new_string_12 = new_string_11.replace(" ", "")
id_values_2.append(new_string_12)
id_values_3 = []
for i in id_values_2:
i2 = ''.join(i)
i3 = i2.replace("'", "")
id_values_3.append(i3)
csv_df = csv_df.iloc[:, 1:]
csv_df.insert(0, 'Species_merged', id_values_3)
csv_df.groupby(by=csv_df.columns, axis=1).sum()
# remove all rows that sum to zero
cols_to_sum = csv_df.columns[: csv_df.shape[1] - 1]
csv_df['sum_all'] = csv_df[cols_to_sum].sum(axis=1)
csv_df = csv_df[csv_df.sum_all != 0]
csv_df = csv_df.drop('sum_all', 1)
csv_df = csv_df.groupby(['Species_merged']).sum()
csv_df = csv_df.round(5)
# grep taxonomy file
tax_df = pd.read_csv(tax_list, sep=";")
tax_df = tax_df.astype(str)
tax_df['Species_merged'] = tax_df[['Genus', 'Species']].apply(lambda x: ','.join(x), axis=1)
tax_df = tax_df.drop('Species', 1)
tax_df.drop(tax_df.index[tax_df['Family'] == "Human"], inplace=True)
tax_df.drop(tax_df.index[tax_df['Species_merged'] == "nan,nan"], inplace=True)
# subset taxonomy file based on samples in csv_df
tax_df_sub_0 = | pd.merge(tax_df, csv_df, on=['Species_merged'], how='right') | pandas.merge |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = | concat(results) | pandas.concat |
"""This module contains PlainFrame and PlainColumn tests.
"""
import collections
import datetime
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal as np_assert_equal
from pywrangler.util.testing.plainframe import (
NULL,
ConverterFromPandas,
NaN,
PlainColumn,
PlainFrame
)
@pytest.fixture
def plainframe_standard():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, 2, False, "string2", "2019-02-01 10:00:00"]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def plainframe_missings():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, NaN, False, "string2", "2019-02-01 10:00:00"],
[NULL, NULL, NULL, NULL, NULL]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def df_from_pandas():
df = pd.DataFrame(
{"int": [1, 2],
"int_na": [1, np.NaN],
"bool": [True, False],
"bool_na": [True, np.NaN],
"float": [1.2, 1.3],
"float_na": [1.2, np.NaN],
"str": ["foo", "bar"],
"str_na": ["foo", np.NaN],
"datetime": [pd.Timestamp("2019-01-01"), pd.Timestamp("2019-01-02")],
"datetime_na": [pd.Timestamp("2019-01-01"), pd.NaT]})
return df
@pytest.fixture
def df_from_spark(spark):
from pyspark.sql import types
values = collections.OrderedDict(
{"int": [1, 2, None],
"smallint": [1, 2, None],
"bigint": [1, 2, None],
"bool": [True, False, None],
"single": [1.0, NaN, None],
"double": [1.0, NaN, None],
"str": ["foo", "bar", None],
"datetime": [datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
None],
"date": [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
None],
"map": [{"foo": "bar"}, {"bar": "foo"}, None],
"array": [[1, 2, 3], [3, 4, 5], None]}
)
data = list(zip(*values.values()))
c = types.StructField
columns = [c("int", types.IntegerType()),
c("smallint", types.ShortType()),
c("bigint", types.LongType()),
c("bool", types.BooleanType()),
c("single", types.FloatType()),
c("double", types.DoubleType()),
c("str", types.StringType()),
c("datetime", types.TimestampType()),
c("date", types.DateType()),
c("map", types.MapType(types.StringType(), types.StringType())),
c("array", types.ArrayType(types.IntegerType()))]
schema = types.StructType(columns)
return spark.createDataFrame(data, schema=schema)
def create_plain_frame(cols, rows, reverse_cols=False, reverse_rows=False):
"""Helper function to automatically create instances of PlainFrame.
`cols` contains typed column annotations like "col1:int".
"""
if reverse_cols:
cols = cols[::-1]
columns, dtypes = zip(*[col.split(":") for col in cols])
values = list(range(1, rows + 1))
mapping = {"str": list(map(str, values)),
"int": values,
"float": list(map(float, values)),
"bool": list([x % 2 == 0 for x in values]),
"datetime": ["2019-01-{:02} 10:00:00".format(x) for x in
values]}
data = [mapping[dtype] for dtype in dtypes]
data = list(zip(*data))
if reverse_rows:
data = data[::-1]
return PlainFrame.from_plain(data=data,
dtypes=dtypes,
columns=columns)
def create_plainframe_single(values, dtype):
"""Create some special scenarios more easily. Always assumes a single
column with identical name. Only values and dtype varies.
"""
data = [[x] for x in values]
dtypes = [dtype]
columns = ["name"]
return PlainFrame.from_plain(data=data, dtypes=dtypes, columns=columns)
def test_plainframe():
# incorrect instantiation with non tuples with non factory method
plain_column = PlainColumn.from_plain(name="int",
dtype="int",
values=[1, 2, 3])
# correct instantiation
PlainFrame(plaincolumns=(plain_column,))
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[plain_column])
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[1])
def test_plainframe_from_plain_pandas_empty():
# tests GH#29
df = PlainFrame.from_plain(data=[], columns=["col1:int", "col2:str"])
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "str"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
dfp = pd.DataFrame(columns=["col1", "col2"], dtype=int)
df = PlainFrame.from_pandas(dfp)
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "int"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
def test_plainframe_attributes(plainframe_missings):
df = plainframe_missings
col_values = lambda x: df.get_column(x).values
assert df.columns == ["int", "float", "bool", "str", "datetime"]
assert df.dtypes == ["int", "float", "bool", "str", "datetime"]
assert col_values("int") == (1, 2, NULL)
assert col_values("str") == ("string", "string2", NULL)
assert col_values("datetime")[0] == datetime.datetime(2019, 1, 1, 10)
def test_plainframe_modify():
# change single value
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([1, 1], "int")
assert df_origin.modify({"name": {1: 1}}) == df_target
# change multiple values
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([3, 3], "int")
assert df_origin.modify({"name": {0: 3, 1: 3}}) == df_target
# change multiple columns
df_origin = PlainFrame.from_plain(data=[[1, 2], ["a", "b"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
df_target = PlainFrame.from_plain(data=[[1, 1], ["a", "a"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
assert df_origin.modify({"int": {1: 1}, "str": {1: "a"}}) == df_target
def test_plainframe_modify_assertions():
# check incorrect type conversion
df = create_plainframe_single([1, 2], "int")
with pytest.raises(TypeError):
df.modify({"name": {0: "asd"}})
def test_plainframe_getitem_subset():
df = create_plain_frame(["col1:str", "col2:int", "col3:int"], 2)
df_sub = create_plain_frame(["col1:str", "col2:int"], 2)
cmp_kwargs = dict(assert_column_order=True,
assert_row_order=True)
# test list of strings, slice and string
df["col1", "col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1":"col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1"].assert_equal(df_sub["col1"], **cmp_kwargs)
# test incorrect type
with pytest.raises(ValueError):
df[{"col1"}]
# test invalid column name
with pytest.raises(ValueError):
df["non_existant"]
def test_plainframe_get_column():
df = create_plain_frame(["col1:str", "col2:int"], 2)
assert df.get_column("col1") is df.plaincolumns[0]
# check value error for non existent column
with pytest.raises(ValueError):
df.get_column("does_not_exist")
def test_plainframe_parse_typed_columns():
parse = PlainFrame._parse_typed_columns
# invalid splits
cols = ["col1:int", "col2"]
with pytest.raises(ValueError):
parse(cols)
# invalid types
cols = ["col1:asd"]
with pytest.raises(ValueError):
parse(cols)
# invalid abbreviations
cols = ["col1:a"]
with pytest.raises(ValueError):
parse(cols)
# correct types and columns
cols = ["col1:str", "col2:s",
"col3:int", "col4:i",
"col5:float", "col6:f",
"col7:bool", "col8:b",
"col9:datetime", "col10:d"]
names = ["col{}".format(x) for x in range(1, 11)]
dtypes = ["str", "str",
"int", "int",
"float", "float",
"bool", "bool",
"datetime", "datetime"]
result = (names, dtypes)
np_assert_equal(parse(cols), result)
def test_plainframe_from_plain():
# unequal elements per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1]],
columns=["a", "b"],
dtypes=["int", "int"])
# mismatch between number of columns and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a"],
dtypes=["int", "int"])
# mismatch between number of dtypes and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int"])
# incorrect dtypes
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bad_type"])
# type errors conversion
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bool"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["float", "int"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["str", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[True, 2],
[False, 2]],
columns=["a", "b"],
dtypes=["datetime", "int"])
# correct implementation should not raise
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "int"])
def test_plainframe_to_plain():
columns = dtypes = ["int", "float", "bool", "str"]
data = [[1, 1.1, True, "string"],
[2, 2, False, "string2"]]
pf = PlainFrame.from_plain(data=data, columns=columns, dtypes=dtypes)
expected = (data, columns, dtypes)
assert pf.to_plain() == expected
def test_plainframe_from_dict():
data = collections.OrderedDict(
[("col1:int", [1, 2, 3]),
("col2:s", ["a", "b", "c"])]
)
df = PlainFrame.from_dict(data)
# check correct column order and dtypes
np_assert_equal(df.columns, ("col1", "col2"))
np_assert_equal(df.dtypes, ["int", "str"])
# check correct values
np_assert_equal(df.get_column("col1").values, (1, 2, 3))
np_assert_equal(df.get_column("col2").values, ("a", "b", "c"))
def test_plainframe_to_dict():
df = create_plain_frame(["col2:str", "col1:int"], 2)
to_dict = df.to_dict()
keys = list(to_dict.keys())
values = list(to_dict.values())
# check column order and dtypes
np_assert_equal(keys, ["col2:str", "col1:int"])
# check values
np_assert_equal(values[0], ["1", "2"])
np_assert_equal(values[1], [1, 2])
def test_plainframe_from_pandas(df_from_pandas):
df = df_from_pandas
df_conv = PlainFrame.from_pandas(df)
# check int to int
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, 2)
# check bool to bool
assert df_conv.get_column("bool").dtype == "bool"
assert df_conv.get_column("bool").values == (True, False)
# check bool (object) to bool with nan
assert df_conv.get_column("bool_na").dtype == "bool"
assert df_conv.get_column("bool_na").values == (True, NULL)
# check float to float
assert df_conv.get_column("float").dtype == "float"
assert df_conv.get_column("float").values == (1.2, 1.3)
# check float to float with nan
assert df_conv.get_column("float_na").dtype == "float"
np_assert_equal(df_conv.get_column("float_na").values, (1.2, NaN))
# check str to str
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar")
# check str to str with nan
assert df_conv.get_column("str_na").dtype == "str"
assert df_conv.get_column("str_na").values == ("foo", NULL)
# check datetime to datetime
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# check datetime to datetime with nan
assert df_conv.get_column("datetime_na").dtype == "datetime"
assert df_conv.get_column("datetime_na").values == (
datetime.datetime(2019, 1, 1), NULL)
def test_plainframe_from_pandas_assertions_missings_cast():
# check mixed dtype raise
df = pd.DataFrame({"mixed": [1, "foo bar"]})
with pytest.raises(TypeError):
PlainFrame.from_pandas(df)
# check assertion for incorrect forces
# too many types provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["int", "str"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "str",
"dummy": "int"})
# invalid dtypes provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["not existant type"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "not existant type"})
# invalid column names provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"dummy": "str"})
# check int to forced int with nan
df = pd.DataFrame({"int": [1, np.NaN]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, NULL)
# check force int to float
df = pd.DataFrame({"int": [1, 2]})
df_conv = PlainFrame.from_pandas(df, dtypes=["float"])
assert df_conv.get_column("int").dtype == "float"
assert df_conv.get_column("int").values == (1.0, 2.0)
# check force float to int
df = pd.DataFrame({"float": [1.0, 2.0]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("float").dtype == "int"
assert df_conv.get_column("float").values == (1, 2)
# check force str to datetime
df = pd.DataFrame({"datetime": ["2019-01-01", "2019-01-02"]})
df_conv = PlainFrame.from_pandas(df, dtypes=["datetime"])
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# dtype object with strings and nan should pass correctly
df = pd.DataFrame({"str": ["foo", "bar", NaN]}, dtype=object)
df_conv = PlainFrame.from_pandas(df)
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar", NULL)
def test_plainframe_from_pandas_inspect_dtype():
inspect = ConverterFromPandas.inspect_dtype
# raise if incorrect type
ser = pd.Series("asd", dtype=object)
with pytest.raises(TypeError):
inspect(ser)
def test_plainframe_from_pandas_inspect_dtype_object():
inspect = ConverterFromPandas.inspect_dtype_object
# ensure string with missings
df = pd.DataFrame({"dummy": ["asd", NaN]})
conv = ConverterFromPandas(df)
assert conv.inspect_dtype_object("dummy") == "str"
# check incorrect dtype
df = pd.DataFrame({"dummy": ["asd", tuple([1, 2])]})
conv = ConverterFromPandas(df)
with pytest.raises(TypeError):
conv.inspect_dtype_object("dummy")
def test_plainframe_to_pandas(plainframe_standard):
from pandas.api import types
df = plainframe_standard.to_pandas()
assert types.is_integer_dtype(df["int"])
assert df["int"][0] == 1
assert df["int"].isnull().sum() == 0
assert types.is_float_dtype(df["float"])
assert df["float"].isnull().sum() == 0
assert df["float"][1] == 2.0
assert types.is_bool_dtype(df["bool"])
np_assert_equal(df["bool"][0], True)
assert df["bool"].isnull().sum() == 0
assert types.is_object_dtype(df["str"])
assert df["str"].isnull().sum() == 0
assert df["str"][0] == "string"
assert types.is_datetime64_dtype(df["datetime"])
assert df["datetime"].isnull().sum() == 0
assert df["datetime"][0] == pd.Timestamp("2019-01-01 10:00:00")
def test_plainframe_to_pandas_missings(plainframe_missings):
from pandas.api import types
df = plainframe_missings.to_pandas()
assert types.is_float_dtype(df["int"])
assert df["int"][0] == 1.0
assert pd.isnull(df["int"][2])
assert df["int"].isnull().sum() == 1
assert df["float"].isnull().sum() == 2
assert df["float"][0] == 1.1
assert pd.isnull(df["float"][2])
assert types.is_float_dtype(df["bool"])
assert df["bool"][0] == 1.0
assert df["bool"].isnull().sum() == 1
assert | types.is_object_dtype(df["str"]) | pandas.api.types.is_object_dtype |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.setting import setting
from src.cpePaser import day_extract
from src.cpePaser import week_extract
import pandas as pd
from src.timeOperator import timeOpt
import numpy as np
import math
import os
from src.postEva.exper_paser import exper_paser
from src.postEva.capacity_paser import capacity_paser
from src.postEva.capacity_paser import cell
from src.logger_setting.my_logger import get_logger
logger = get_logger()
engineer_map = {}
threshold_map = {}
def get_post_df():
min_month = week_extract.get_min_month()
all_file = week_extract.get_file_by_range(timeOpt.add_months(min_month, 2), timeOpt.add_months(min_month, 3))
df = pd.DataFrame(columns=setting.parameter_json["post_eva_from_day_column_name"])
for f in all_file:
file_df = pd.read_csv(f, error_bad_lines=False, index_col=False)[setting.parameter_json[
"post_eva_from_day_column_name"]]
df = df.append(file_df)
return df
def post_evaluation():
df = get_post_df()
grouped = day_extract.groupby_calc(df).apply(calc_post_eva)
result = pd.DataFrame(grouped)
result.to_csv(os.path.join(setting.post_eva_path, 'post_eva_data.csv'), index=False)
def calc_post_eva(df):
esn = df['esn'].values
total_download = np.sum(df['TotalDownload'].values) / setting.mb
total_upload = np.sum(df['TotalUpload'].values) / setting.mb
df_sort_by_date = df.sort_values('date')[['IMSI', 'IMEI', 'MSISDN']]
newst_imsi = df_sort_by_date['IMSI'].values[-1]
newst_imei = df_sort_by_date['IMEI'].values[-1]
newst_msisdn = df_sort_by_date['MSISDN'].values[-1]
data = {'esn': esn[0],
'TotalDownload': [total_download],
'TotalUpload': [total_upload],
'IMSI': [newst_imsi],
'IMEI': [newst_imei],
'MSISDN': [newst_msisdn]}
result = | pd.DataFrame(data) | pandas.DataFrame |
# import cassandra requires pip3 install cassandra-driver
from cassandra.cluster import Cluster
import pandas as pd
def pandas_factory(colnames, rows):
return pd.DataFrame(rows, columns=colnames)
class CassandraWrapper:
def __init__(self):
self._client = Cluster(['localhost'], port=9042)
print('Cassandra initiation: OK')
def query(self, ids, orderBy, sortRule, sentiment, sentimentType, polarity, polarityValue):
###################################################################################
# Initiation
###################################################################################
session = self._client.connect(keyspace='test')
#----------------------------------------------------------------------------------
# Build where conditions
#----------------------------------------------------------------------------------
# Defines format of the result
session.row_factory = pandas_factory
session.default_fetch_size = None
columns = '\
app_id, \
author, \
content, \
developer_reply, \
developer_reply_post_date, \
helpful_count, \
post_date, \
rating, \
sentiment_type, \
sentiment_polarity, \
sentiment_subjectivity'
connector = ''
where = ''
if sentiment:
where = 'sentiment_type IN (\'' + sentimentType + '\')'
connector = ' AND '
if polarity:
if polarity < 0:
where = where + connector + 'sentiment_polarity < ' + str(polarityValue)
else:
where = where + connector + 'sentiment_polarity > ' + str(polarityValue)
connector = ' AND '
if not polarity and orderBy:
where = where + connector + 'sentiment_polarity >= -1 AND sentiment_polarity <= 1'
connector = ' AND '
#----------------------------------------------------------------------------------
# Do the query
#----------------------------------------------------------------------------------
# Performs the query
table = | pd.DataFrame() | pandas.DataFrame |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
import bs4 as bs
import urllib.request
import datetime
import os
today=datetime.date.today()
display_list = []
display_list1 = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
urls=[]
model=[]
company=[]
specs=[]
detail2=[]
aa=[]
country=[]
memory_list=[]
url="https://www.sonymobile.com/in/products/phones/"
r=requests.get(url)
soup=BeautifulSoup(r.text,'html.parser')
links=soup.find_all('a',attrs={'class':'product-name p2'})
for s in links:
tt=s.find_all("strong")
for y in tt:
model.append(y.text)
links1=soup.find_all('a',attrs={'class':'product-name p2'})
for t in links:
urls.append(t['href'])
#print(urls)
for i in urls:
data1=[]
z=i
r=requests.get(z)
soup=BeautifulSoup(r.text,'html.parser')
data=soup.find_all('p',attrs={'class':'copy ghost-center with-icon'})
for s in data:
tt=s.find_all("span")
for p in tt:
data1.append(p.text)
#print(data1)
st=''
for l in range(len(data1)):
st=st+data1[l]
specs.append(st)
for i in range(len(specs)):
company.append("SONY")
#detailed-----
for x in urls:
aa.append(x+"specifications/")
for k in aa:
d=k
print(d)
heads = []
dets = []
r=requests.get(d)
soup=BeautifulSoup(r.text,'html.parser')
dat=soup.find_all('div', attrs={'class':'grid no-grid-at-567 spec-section'})
for ta in dat:
tt=ta.find_all('h6', attrs={'class':'t6-light section-label'})
d=ta.find_all('div', attrs={'class':'span4'})
for t in tt:
heads.append(t.text)
st=''
for yy in d:
st=st+(yy.text.strip())
dets.append(st)
#print(heads)
#print(dets)
for i in range(len(heads)):
#print(heads[i])
if 'Memory and storage' in heads[i]:
memory_list.append(dets[i])
op=''
if 'Display' in heads[i]:
match = re.search(r'\s*\d+\s*\.*\,*\s*\d*\s*cm',dets[i])
if match:
op=str(match.group())
if not match:
match=re.search(r'\s*\d+\s*\,*\s*\d*\s*cm',dets[i])
if match:
op=str(match.group())
if not match:
op=" "
display_list.append(op+" HD DISPLAY")
#print("________________________________________")
if 'Processor (CPU)' in heads[i]:
processor_list.append(dets[i])
c=''
if 'Battery' in heads[i]:
match = re.search(r'\s*\d+\s*\,*\s*\d*\s*mAh',dets[i])
if match:
c=str(match.group())
#print(c)
if not match:
c=' '
#print(c)
battery_list.append(c)
if 'Dimensions' in heads[i]:
match = re.search(r'x\s*\d*\.\d*\s*mm',dets[i])
if match:
thickness_list.append(match.group())
if not match:
match = re.search(r'x\s*\d*\s*mm',dets[i])
thickness_list.append(match.group())
st=" "
st1=''
s=''
b=''
d=''
for i in range(len(heads)):
if 'Main Camera'in heads[i] or 'Main camera' in heads[i]:
st=st+dets[i]
match=re.search(r'\d+\s*\.*\s*\d*\s*MP',st)
if match:
s=str(match.group())
if not match:
s=" "
d=d+"MAIN CAMERA"+s
if 'Front Camera'in heads[i] or 'Front camera' in heads[i]:
st1=st1+dets[i]
mare=re.search(r'\d+\s*\.*\s*\d*\s*MP',st1)
if match:
b=str(match.group())
if not match:
b=" "
d=d+" FRONT CAMERA"+b
camera_list.append(d)
for x in aa:
extras_links.append(x)
country.append('JAPAN')
print(len(country))
print(len(company))
print(len(specs))
print(len(camera_list))
print(len(memory_list))
print(len(battery_list))
print(len(thickness_list))
print(len(processor_list))
print(len(extras_links))
records=[]
for i in range(len(aa)):
records.append((country[i], company[i], model[i], specs[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
path='C:\\LavaWebScraper\\BrandWiseFiles\\'
df = | pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
| Term() | pandas.io.pytables.Term |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 08 13:41:45 2018
@author: behzad
"""
import numpy as np
import pandas as pd
A1=np.array([2,5.2,1.8,5])
S1 = pd.Series([2,5.2,1.8,5],["a","b","c","d"])
S2 = | pd.Series([2,5.2,1.8,5],index= ["a","b","c","d"]) | pandas.Series |
"""
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import BytesIO
import os
import tempfile
import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
def test_bytes_io_input(all_parsers):
encoding = "cp1255"
parser = all_parsers
data = BytesIO("שלום:1234\n562:123".encode(encoding))
result = parser.read_csv(data, sep=":", encoding=encoding)
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_read_csv_unicode(all_parsers):
parser = all_parsers
data = BytesIO("\u0141aski, Jan;1".encode("utf-8"))
result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
expected = DataFrame([["\u0141aski, Jan", 1]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sep", [",", "\t"])
@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
def test_utf16_bom_skiprows(all_parsers, sep, encoding):
# see gh-2298
parser = all_parsers
data = """skip this
skip this too
A,B,C
1,2,3
4,5,6""".replace(
",", sep
)
path = f"__{tm.rands(10)}__.csv"
kwargs = dict(sep=sep, skiprows=2)
utf8 = "utf-8"
with tm.ensure_clean(path) as path:
from io import TextIOWrapper
bytes_data = data.encode(encoding)
with open(path, "wb") as f:
f.write(bytes_data)
bytes_buffer = BytesIO(data.encode(utf8))
bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8)
result = parser.read_csv(path, encoding=encoding, **kwargs)
expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
bytes_buffer.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
result = parser.read_csv(path, encoding="utf-16", sep="\t")
assert len(result) == 50
def test_unicode_encoding(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
result = parser.read_csv(path, header=None, encoding="latin-1")
result = result.set_index(0)
got = result[1][1632]
expected = "\xc1 k\xf6ldum klaka (<NAME>) (1994)"
assert got == expected
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# Basic test
("a\n1", dict(), DataFrame({"a": [1]})),
# "Regular" quoting
('"a"\n1', dict(quotechar='"'), DataFrame({"a": [1]})),
# Test in a data row instead of header
("b\n1", dict(names=["a"]), DataFrame({"a": ["b", "1"]})),
# Test in empty data row with skipping
("\n1", dict(names=["a"], skip_blank_lines=True), DataFrame({"a": [1]})),
# Test in empty data row without skipping
(
"\n1",
dict(names=["a"], skip_blank_lines=False),
| DataFrame({"a": [np.nan, 1]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 19:18:20 2019
@author: <NAME>
"""
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras import optimizers
from keras.models import Model
from keras.callbacks import History
from keras.applications import vgg16, xception, resnet, inception_v3, inception_resnet_v2, nasnet
import pandas as pd
import os
from datetime import datetime as dt
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import Dense, GlobalAveragePooling2D
from evaluation import EvalUtils
from keras.utils import plot_model
from contextlib import redirect_stdout
from time import time
from keras.callbacks.tensorboard_v1 import TensorBoard
from utils import Utility
#FORCE GPU USE
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#!wget https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 --no-check-certificate
#!wget https://github.com/keras-team/keras-applications/releases/download/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 --no-check-certificate
#!wget https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 --no-check-certificate
#!wget https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5 --no-check-certificate
#!wget https://github.com/fchollet/deep-learning-models/releases/download/v0.7/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5 --no-check-certificate
#!wget https://github.com/titu1994/Keras-NASNet/releases/download/v1.2/NASNet-large-no-top.h5 --no-check-certificate
class TrainingUtils:
def __init__(self, input_params, path_dict):
self.input_params = input_params
self.path_dict = path_dict
def plot_layer_arch(self, model, stage_no):
"""
Get the model architecture, so that you can make
the decision upto which layers you can freeze.
The particular layer name can in inferred from
this plot.
Arguments:
-model : The trained model
-model_name : Name of the model, for example - vgg16, inception_v3, resnet50 etc
-stage_no : The stage of training. This pipeline is trained in two stages 1 and 2.
The stage number is needed to save the architecture for individual stages
and have unique file names
"""
plot_model(model,
to_file=self.path_dict['model_path']+"stage{}/".format(stage_no)+'{}_model_architecture_stage_{}.pdf'.format(self.input_params["model_name"],stage_no),
show_shapes=True,
show_layer_names=True)
def save_summary(self, model, stage_no):
"""
This function is used to save the model summary along with
the number of parameters at each stage.
Arguments:
-model : The trained model
-model_name : Name of the model, for example - vgg16, inception_v3, resnet50 etc
-stage_no : The stage of training. This pipeline is trained in two stages 1 and 2.
The stage number is needed to save the architecture for individual stages
and have unique file names
"""
with open(self.path_dict['model_path']+"stage{}/".format(stage_no)+"{}_model_summary_stage_{}.txt".format(self.input_params["model_name"], stage_no), "w") as f:
with redirect_stdout(f):
model.summary()
def save_params(self):
"""
This block of code is used to save the hyperparameter
values into a csv file in the evaluation folder.
Arguments:
-input_params : This parameter will contain all the information that the user will
input through the terminal
"""
with open(self.path_dict['sim_path'] + '/hyperparameters.csv', 'w') as f:
f.write("%s,%s\n"%("hyperparameter","value\n"))
for key in self.input_params.keys():
f.write("%s,%s\n"%(key,self.input_params[key]))
def callbacks_list(self, stage_no):
"""
This function is used to define custom callbacks. Any new callbacks
that are to be added to the model must be defined in this function
and returned as a list of callbacks.
Arguments:
-input_params : This parameter will contain all the information that the user will
input through the terminal
-stage_no : The stage of training. This pipeline is trained in two stages 1 and 2.
The stage number is needed to save the architecture for individual stages
and have unique file names
"""
filepath = self.path_dict['model_path']+"stage{}/".format(stage_no)+"{}_weights_stage_{}.hdf5".format(self.input_params['model_name'], stage_no)
checkpoint = ModelCheckpoint(filepath,
monitor=self.input_params['monitor'],
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
reduce_learning_rate = ReduceLROnPlateau(monitor=self.input_params['monitor'],
factor = 0.1,
patience = 3)
early_stop = EarlyStopping(monitor=self.input_params['monitor'],
patience = 5)
history = History()
#Custom callback to monitor both validation accuracy and loss
"""
best_val_acc = 0
best_val_loss = sys.float_info.max
def saveModel(epoch,logs):
val_acc = logs['val_acc']
val_loss = logs['val_loss']
if val_acc > best_val_acc:
best_val_acc = val_acc
model.save(...)
elif val_acc == best_val_acc:
if val_loss < best_val_loss:
best_val_loss=val_loss
model.save(...)
callbacks = [LambdaCallback(on_epoch_end=saveModel)]
"""
tensorboard = TensorBoard(log_dir=self.path_dict['model_path']+"stage{}/".format(stage_no)+"logs/{}".format(time),
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch')
#!tensorboard --logdir=/home/developer/Desktop/Saugata/Classification-Pipeline/simulations/SIM_01/models/stage1/logs/
list_ = [checkpoint, reduce_learning_rate, history, early_stop, tensorboard]
return list_
def train_stage1(self):
"""
In this stage, we will freeze all the convolution blocks and train
only the newly added dense layers. We will add a global spatial average
pooling layer, we will add fully connected dense layers on the output
of the base models. We will freeze the convolution base and train only
the top layers. We will set all the convolution layers to false, the model
should be compiled when all the convolution layers are set to false.
Arguments:
-input_params : This parameter will contain all the information that the user will
input through the terminal
"""
print("\nTraining the model by freezing the convolution block and tuning the top layers...")
st = dt.now()
utils_obj = Utility(self.input_params, self.path_dict)
#Put if statement here. If model_name != custom then run this block, or else. Do something else.
base_model = utils_obj.load_imagenet_model()
#Adding a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
#Adding a fully-connected dense layer
x = Dense(self.input_params['dense_neurons'], activation='relu', kernel_initializer='he_normal')(x)
#Adding a final dense output final layer
n = utils_obj.no_of_classes()
output_layer = Dense(n, activation='softmax', kernel_initializer='glorot_uniform')(x)
#Define the model
model_stg1 = Model(inputs=base_model.input, outputs=output_layer)
#Here we will freeze the convolution base and train only the top layers
#We will set all the convolution layers to false, the model should be
#compiled when all the convolution layers are set to false
for layer in base_model.layers:
layer.trainable = False
#Compiling the model
model_stg1.compile(optimizer=optimizers.Adam(lr=self.input_params['stage1_lr']),
loss='categorical_crossentropy',
metrics=[self.input_params['metric']])
#Normalize the images
train_datagen = ImageDataGenerator(preprocessing_function=utils_obj.init_preprocess_func())
val_datagen = ImageDataGenerator(preprocessing_function=utils_obj.init_preprocess_func())
df_train = utils_obj.load_data("train")
df_val = utils_obj.load_data("val")
train_generator = train_datagen.flow_from_dataframe(dataframe=df_train,
directory=self.path_dict['source'],
target_size=utils_obj.init_sizes(),
x_col="filenames",
y_col="class_label",
batch_size=self.input_params['batch_size'],
class_mode='categorical',
color_mode='rgb',
shuffle=True)
val_generator = val_datagen.flow_from_dataframe(dataframe=df_val,
directory=self.path_dict['source'],
target_size=utils_obj.init_sizes(),
x_col="filenames",
y_col="class_label",
batch_size=self.input_params['batch_size'],
class_mode='categorical',
color_mode='rgb',
shuffle=True)
nb_train_samples = len(train_generator.classes)
nb_val_samples = len(val_generator.classes)
history=model_stg1.fit_generator(generator=train_generator,
steps_per_epoch=nb_train_samples // self.input_params['batch_size'],
epochs=self.input_params['epochs1'],
validation_data=val_generator,
validation_steps=nb_val_samples // self.input_params['batch_size'],
callbacks=TrainingUtils.callbacks_list(self, 1)) #1 for stage 1
hist_df = | pd.DataFrame(history.history) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_trade_balance"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告
def macro_usa_current_account():
"""
美国经常帐报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_usa_current_account
https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012
:return: 美国经常帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国经常账报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "12",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_current_account"
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告
def macro_usa_rig_count():
"""
贝克休斯钻井报告, 数据区间从20080317-至今
https://datacenter.jin10.com/reportType/dc_rig_count_summary
https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203
:return: 贝克休斯钻井报告-当周
:rtype: pandas.Series
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/baker.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["钻井总数_钻井数"] = temp_df["钻井总数"].apply(lambda x: x[0])
big_df["钻井总数_变化"] = temp_df["钻井总数"].apply(lambda x: x[1])
big_df["美国石油钻井_钻井数"] = temp_df["美国石油钻井"].apply(lambda x: x[0])
big_df["美国石油钻井_变化"] = temp_df["美国石油钻井"].apply(lambda x: x[1])
big_df["混合钻井_钻井数"] = temp_df["混合钻井"].apply(lambda x: x[0])
big_df["混合钻井_变化"] = temp_df["混合钻井"].apply(lambda x: x[1])
big_df["美国天然气钻井_钻井数"] = temp_df["美国天然气钻井"].apply(lambda x: x[0])
big_df["美国天然气钻井_变化"] = temp_df["美国天然气钻井"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告
# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告
def macro_usa_ppi():
"""
美国生产者物价指数(PPI)报告, 数据区间从20080226-至今
https://datacenter.jin10.com/reportType/dc_usa_ppi
https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628
:return: 美国生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "37",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告
def macro_usa_core_ppi():
"""
美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今
https://datacenter.jin10.com/reportType/dc_usa_core_ppi
https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709
:return: 美国核心生产者物价指数(PPI)报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心生产者物价指数(PPI)报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "7",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_ppi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告
def macro_usa_api_crude_stock():
"""
美国API原油库存报告, 数据区间从20120328-至今
https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock
https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859
:return: 美国API原油库存报告-今值(万桶)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国API原油库存报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "69",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_api_crude_stock"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告
def macro_usa_pmi():
"""
美国Markit制造业PMI初值报告, 数据区间从20120601-至今
https://datacenter.jin10.com/reportType/dc_usa_pmi
https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969
:return: 美国Markit制造业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "74",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告
def macro_usa_ism_pmi():
"""
美国ISM制造业PMI报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071
:return: 美国ISM制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "28",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告
def macro_usa_industrial_production():
"""
美国工业产出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_industrial_production
https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188
:return: 美国工业产出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工业产出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "20",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_industrial_production"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告
def macro_usa_durable_goods_orders():
"""
美国耐用品订单月率报告, 数据区间从20080227-至今
https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders
https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295
:return: 美国耐用品订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国耐用品订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "13",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_durable_goods_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告
def macro_usa_factory_orders():
"""
美国工厂订单月率报告, 数据区间从19920401-至今
https://datacenter.jin10.com/reportType/dc_usa_factory_orders
https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385
:return: 美国工厂订单月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国工厂订单月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "16",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_factory_orders"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告
def macro_usa_services_pmi():
"""
美国Markit服务业PMI初值报告, 数据区间从20120701-至今
https://datacenter.jin10.com/reportType/dc_usa_services_pmi
https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503
:return: 美国Markit服务业PMI初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国Markit服务业PMI初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "89",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_services_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告
def macro_usa_business_inventories():
"""
美国商业库存月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_business_inventories
https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618
:return: 美国商业库存月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国商业库存月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "4",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_business_inventories"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告
def macro_usa_ism_non_pmi():
"""
美国ISM非制造业PMI报告, 数据区间从19970801-至今
https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi
https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693
:return: 美国ISM非制造业PMI报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ISM非制造业PMI报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "29",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_ism_non_pmi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告
def macro_usa_nahb_house_market_index():
"""
美国NAHB房产市场指数报告, 数据区间从19850201-至今
https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index
https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817
:return: 美国NAHB房产市场指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NAHB房产市场指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "31",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_nahb_house_market_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告
def macro_usa_house_starts():
"""
美国新屋开工总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_house_starts
https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388
:return: 美国新屋开工总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋开工总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "17",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_starts"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告
def macro_usa_new_home_sales():
"""
美国新屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_new_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501
:return: 美国新屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国新屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "32",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_new_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告
def macro_usa_building_permits():
"""
美国营建许可总数报告, 数据区间从20080220-至今
https://datacenter.jin10.com/reportType/dc_usa_building_permits
https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v=1578747599
:return: 美国营建许可总数报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国营建许可总数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "3",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_building_permits"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告
def macro_usa_exist_home_sales():
"""
美国成屋销售总数年化报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_exist_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v=1578747703
:return: 美国成屋销售总数年化报告-今值(万户)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋销售总数年化报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万户)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "15",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_exist_home_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告
def macro_usa_house_price_index():
"""
美国FHFA房价指数月率报告, 数据区间从19910301-至今
https://datacenter.jin10.com/reportType/dc_usa_house_price_index
https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v=1578747781
:return: 美国FHFA房价指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国FHFA房价指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "51",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_house_price_index"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告
def macro_usa_spcs20():
"""
美国S&P/CS20座大城市房价指数年率报告, 数据区间从20010201-至今
https://datacenter.jin10.com/reportType/dc_usa_spcs20
https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v=1578747873
:return: 美国S&P/CS20座大城市房价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国S&P/CS20座大城市房价指数年率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "52",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_spcs20"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告
def macro_usa_pending_home_sales():
"""
美国成屋签约销售指数月率报告, 数据区间从20010301-至今
https://datacenter.jin10.com/reportType/dc_usa_pending_home_sales
https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v=1578747959
:return: 美国成屋签约销售指数月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国成屋签约销售指数月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "34",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_pending_home_sales"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告
def macro_usa_cb_consumer_confidence():
"""
美国谘商会消费者信心指数报告, 数据区间从19700101-至今
https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v=1578576859
:return: 美国谘商会消费者信心指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国谘商会消费者信心指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "5",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cb_consumer_confidence"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国NFIB小型企业信心指数报告
def macro_usa_nfib_small_business():
"""
美国NFIB小型企业信心指数报告, 数据区间从19750201-至今
https://cdn.jin10.com/dc/reports/dc_usa_nfib_small_business_all.js?v=1578576631
:return: 美国NFIB小型企业信心指数报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_nfib_small_business_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国NFIB小型企业信心指数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "63",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "nfib_small_business"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-领先指标-美国密歇根大学消费者信心指数初值报告
def macro_usa_michigan_consumer_sentiment():
"""
美国密歇根大学消费者信心指数初值报告, 数据区间从19700301-至今
https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment
:return: 美国密歇根大学消费者信心指数初值报告-今值
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_michigan_consumer_sentiment_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}")
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国密歇根大学消费者信心指数初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "50",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "michigan_consumer_sentiment"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-美国-其他-美国EIA原油库存报告
def macro_usa_eia_crude_rate():
"""
美国EIA原油库存报告, 数据区间从19950801-至今
https://datacenter.jin10.com/reportType/dc_eia_crude_oil
:return: pandas.Series
1982-09-01 -262.6
1982-10-01 -8
1982-11-01 -41.3
1982-12-01 -87.6
1983-01-01 51.3
...
2019-10-02 310
2019-10-09 292.7
2019-10-16 0
2019-10-17 928.1
2019-10-23 0
"""
t = time.time()
res = requests.get(
JS_USA_EIA_CRUDE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国EIA原油库存(万桶)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万桶)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "10",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "eia_crude_rate"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-其他-美国初请失业金人数报告
def macro_usa_initial_jobless():
"""
美国初请失业金人数报告, 数据区间从19700101-至今
:return: pandas.Series
1970-01-01 22.1087
1970-02-01 24.9318
1970-03-01 25.85
1970-04-01 26.8682
1970-05-01 33.1591
...
2019-09-26 21.5
2019-10-03 22
2019-10-10 21
2019-10-17 21.4
2019-10-24 0
"""
t = time.time()
res = requests.get(
JS_USA_INITIAL_JOBLESS_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国初请失业金人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "44",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "initial_jobless"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-其他-美国原油产量报告
def macro_usa_crude_inner():
"""
美国原油产量报告, 数据区间从19830107-至今
https://datacenter.jin10.com/reportType/dc_eia_crude_oil_produce
:return: pandas.Series
1983-01-07 863.40
1983-01-14 863.40
1983-01-21 863.40
1983-01-28 863.40
1983-02-04 866.00
...
2019-09-20 1250.00
2019-09-27 1240.00
2019-10-04 1260.00
2019-10-11 1260.00
2019-10-18 1260.00
"""
t = time.time()
params = {
"_": t
}
res = requests.get("https://cdn.jin10.com/data_center/reports/usa_oil.json", params=params)
temp_df = pd.DataFrame(res.json().get("values")).T
big_df = pd.DataFrame()
big_df["美国国内原油总量_产量"] = temp_df["美国国内原油总量"].apply(lambda x: x[0])
big_df["美国国内原油总量_变化"] = temp_df["美国国内原油总量"].apply(lambda x: x[1])
big_df["美国本土48州原油产量_产量"] = temp_df["美国本土48州原油产量"].apply(lambda x: x[0])
big_df["美国本土48州原油产量_变化"] = temp_df["美国本土48州原油产量"].apply(lambda x: x[1])
big_df["美国阿拉斯加州原油产量_产量"] = temp_df["美国阿拉斯加州原油产量"].apply(lambda x: x[0])
big_df["美国阿拉斯加州原油产量_变化"] = temp_df["美国阿拉斯加州原油产量"].apply(lambda x: x[1])
big_df = big_df.astype("float")
return big_df
# 金十数据中心-美国商品期货交易委员会CFTC外汇类非商业持仓报告
def macro_usa_cftc_nc_holding():
"""
美国商品期货交易委员会CFTC外汇类非商业持仓报告, 数据区间从 19830107-至今
https://datacenter.jin10.com/reportType/dc_cftc_nc_report
https://cdn.jin10.com/data_center/reports/cftc_4.json?_=1591535493741
:return: pandas.DataFrame
"""
t = time.time()
params = {
"_": str(int(round(t * 1000)))
}
r = requests.get("https://cdn.jin10.com/data_center/reports/cftc_4.json", params=params)
json_data = r.json()
temp_df = pd.DataFrame(json_data["values"]).T
temp_df.fillna("[0, 0, 0]", inplace=True)
big_df = pd.DataFrame()
for item in temp_df.columns:
for i in range(3):
inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])
inner_temp_df.name = inner_temp_df.name + "-" + json_data["keys"][i]["name"]
big_df = pd.concat([big_df, inner_temp_df], axis=1)
big_df.sort_index(inplace=True)
return big_df
# 金十数据中心-美国商品期货交易委员会CFTC商品类非商业持仓报告
def macro_usa_cftc_c_holding():
"""
美国商品期货交易委员会CFTC商品类非商业持仓报告, 数据区间从 19830107-至今
https://datacenter.jin10.com/reportType/dc_cftc_c_report
https://cdn.jin10.com/data_center/reports/cftc_2.json?_=1591536282271
:return: pandas.DataFrame
"""
t = time.time()
params = {
"_": str(int(round(t * 1000)))
}
r = requests.get("https://cdn.jin10.com/data_center/reports/cftc_2.json", params=params)
json_data = r.json()
temp_df = pd.DataFrame(json_data["values"]).T
temp_df.fillna("[0, 0, 0]", inplace=True)
big_df = pd.DataFrame()
for item in temp_df.columns:
for i in range(3):
inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])
inner_temp_df.name = inner_temp_df.name + "-" + json_data["keys"][i]["name"]
big_df = pd.concat([big_df, inner_temp_df], axis=1)
big_df.sort_index(inplace=True)
return big_df
# 金十数据中心-美国商品期货交易委员会CFTC外汇类商业持仓报告
def macro_usa_cftc_merchant_currency_holding():
"""
美国商品期货交易委员会CFTC外汇类商业持仓报告, 数据区间从 19860115-至今
https://datacenter.jin10.com/reportType/dc_cftc_merchant_currency
https://cdn.jin10.com/data_center/reports/cftc_3.json?_=1591536389283
:return: pandas.DataFrame
"""
t = time.time()
params = {
"_": str(int(round(t * 1000)))
}
r = requests.get("https://cdn.jin10.com/data_center/reports/cftc_3.json", params=params)
json_data = r.json()
temp_df = | pd.DataFrame(json_data["values"]) | pandas.DataFrame |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import csv
import logging
import os
import shutil
from csv import DictWriter
from typing import (
Any, Dict, FrozenSet,
)
from pyhocon import ConfigFactory, ConfigTree
from databuilder.job.base_job import Job
from databuilder.loader.base_loader import Loader
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.serializers import neo4_serializer
from databuilder.utils.closer import Closer
import boto3
import botocore
import pandas as pd
s3 = boto3.resource('s3')
LOGGER = logging.getLogger(__name__)
class S3Neo4jCSVLoader(Loader):
"""
Write node and relationship CSV file(s) that can be consumed by
Neo4jCsvPublisher.
It assumes that the record it consumes is instance of Neo4jCsvSerializable
"""
# Config keys
FORCE_CREATE_DIR = 'force_create_directory'
SHOULD_DELETE_CREATED_DIR = 'delete_created_directories'
# s3 key
# s3 Bucket
NODE_S3_BUCKET = 'node_s3_bucket'
# Node s3 Prefix
NODE_S3_PREFIX = 'node_s3_prefix'
# Relations s3 Prefix
RELATION_S3_PREFIX = 'relations_s3_prefix'
_DEFAULT_CONFIG = ConfigFactory.from_dict({
SHOULD_DELETE_CREATED_DIR: True,
FORCE_CREATE_DIR: False
})
def __init__(self) -> None:
self._node_file_mapping: Dict[Any, DictWriter] = {}
self._relation_file_mapping: Dict[Any, DictWriter] = {}
self._keys: Dict[FrozenSet[str], int] = {}
self._closer = Closer()
def init(self, conf: ConfigTree) -> None:
"""
Initializing S3Neo4jCsvLoader by creating directory for node files
and relationship files. Note that the directory defined in
configuration should not exist.
:param conf:
:return:
"""
conf = conf.with_fallback(S3Neo4jCSVLoader._DEFAULT_CONFIG)
self._s3_bucket = conf.get_string(S3Neo4jCSVLoader.NODE_S3_BUCKET)
self._node_s3_prefix= conf.get_string(S3Neo4jCSVLoader.NODE_S3_PREFIX)
self._relation_s3_prefix = \
conf.get_string(S3Neo4jCSVLoader.RELATION_S3_PREFIX)
def _s3_obj_exists(self, node_dict, key, _node_dir, file_suffix, bucket_info) :
try :
s3.Object(bucket_info, _node_dir + file_suffix+'.csv').load()
return True
except botocore.exceptions.ClientError as e :
if e.response['Error']['Code'] == "404" :
return False
def _get_s3_object(self ,node_dict, key,_s3_bucket_info, _node_s3_prefix, file_suffix) :
if (self._s3_obj_exists(node_dict, key, _node_s3_prefix, file_suffix, _s3_bucket_info) == True) :
df = | pd.read_csv('s3://' + _s3_bucket_info + '/' + _node_s3_prefix + file_suffix+'.csv') | pandas.read_csv |
# BUG: Cannot calculate quantiles from Int64Dtype Series when results are floats #42626
import pandas as pd
print(pd.__version__)
result = | pd.Series([1, 2, 3], dtype="Int64") | pandas.Series |
import os.path
my_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
filepath = os.path.join(my_path, 'documents/Leadss.csv')
fpath = os.path.join(my_path, 'static/images/outliers')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.tools.plotting import table
def outliers(filepath):
newDF = pd.DataFrame()
df = | pd.read_csv(filepath) | pandas.read_csv |
"""
Game scraping functions - ties together the other scraping modules.
"""
import logging
import pandas as pd
from pandas import DataFrame
from hockeydata.constants import PBP_COLUMNS_ENHANCED
from hockeydata.scrape.json_schedule import get_date, get_schedule_game_ids
from hockeydata.scrape.players import get_players
from hockeydata.scrape import json_shifts, json_pbp, html_pbp, json_boxscore
logger = logging.getLogger('LOG.scrape')
def get_games(start: str, end: str) -> DataFrame:
"""
Get the game ids for games that occured in the given time range (inclusive)
:param start: YYYY-MM-DD
:param end: YYYY-MM-DD
:return: Dataframe of game ids
"""
game_ids = get_schedule_game_ids(start, end, False)
return DataFrame(columns=['GAME_ID'], data=game_ids)
def get_game_summaries(game_ids: list) -> DataFrame:
summaries = []
for game_id in game_ids:
current_summary = get_game_summary(game_id)
if current_summary is not None:
summaries.append(current_summary)
if len(summaries) > 0:
return pd.concat(summaries)
else:
return None
def get_game_summary(game_id: str) -> DataFrame:
game_summary = json_boxscore.scrape_game(game_id)
return game_summary
def get_season_pbp(season: int) -> DataFrame:
logger.info("Scraping Season: {}".format(season))
from_date = '-'.join([str(season), '9', '1'])
to_date = '-'.join([str(season + 1), '7', '1'])
game_ids = get_schedule_game_ids(from_date, to_date, False)
return get_games_pbp(game_ids)
def get_seasons_pbp(seasons: list) -> DataFrame:
"""
:param seasons:
:return:
"""
logger.info("Scraping PBP of List of Games of Size: {}".format(len(seasons)))
pbps = []
for season in seasons:
current_pbp = get_season_pbp(season)
if current_pbp is not None:
pbps.append(current_pbp)
if len(pbps) > 0:
return pd.concat(pbps)
else:
return None
def get_game_pbp(game_id: str) -> DataFrame:
"""
Gets the pbp for a game, merges data sources as required.
:param game_id:
:return:
"""
logger.info("Scraping Game: {}".format(game_id))
pbp = game_html_pbp(game_id)
pbp = add_event_coordinates(pbp, game_id)
return pbp
def get_games_pbp(game_ids: list) -> DataFrame:
"""
Gets the pbp for a list of games. This function is just in charge of merging the output
from get_game_pbp()
:param game_ids:
:return:
"""
logger.info("Scraping PBP of List of Games of Size: {}".format(len(game_ids)))
pbps = []
for game_id in game_ids:
current_pbp = get_game_pbp(game_id)
if current_pbp is not None:
pbps.append(current_pbp)
if len(pbps) > 0:
return | pd.concat(pbps) | pandas.concat |
import pandas as pd
def _reversion(bfq_data, xdxr_data, type_):
"""使用数据库数据进行复权"""
info = xdxr_data.query('category==1')
bfq_data = bfq_data.assign(if_trade=1)
if len(info) > 0:
# 有除权数据
data = pd.concat([bfq_data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['category']]], axis=1)
data['if_trade'].fillna(value=0, inplace=True)
data = data.fillna(method='ffill')
data = | pd.concat([data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['fenhong', 'peigu', 'peigujia', 'songzhuangu']]], axis=1) | pandas.concat |
import os
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerTuple
import datetime
import utils
import thesismain
MESSAGES_GENERATED = 'generated'
MESSAGES_PROCESSED = 'processed'
class Plot:
def __init__(self, config_name, network_name, simulation_time, verbose_logs, run_simulation, plot_all):
self.plot_counts = {}
self.time_string = time.strftime('%Y-%m-%dT%H.%M.%S')
self.config_name = config_name
self.network_name = network_name
self.simulation_time = simulation_time
self.path = os.path.join('out', '%s_%s' % (self.time_string, self.config_name))
self.verbose_logs = verbose_logs
self.plot_all = plot_all
if run_simulation:
utils.run_simulation(config_name)
utils.export_to_csv(config_name)
if plot_all:
if not os.path.exists(self.path):
os.mkdir(self.path)
# utils.save_simulation_state(self.path)
self.csv = utils.parse_omnetpp_csv(config_name)
self.run = self.csv.run.str.startswith(config_name)
self.modules = self.csv.module.str.startswith(network_name, na=False)
self.all_messages, self.all_nodes = self.prepare_all_messages()
self.all_nodes_info = {}
# get node information
nodes_info = self.create_message_csv()
for node in self.all_nodes:
node_row = network_name+'.'+node
self.all_nodes_info[node] = (nodes_info['processingType'][node_row], nodes_info['processingScale'][node_row])
def __del__(self):
if os.path.exists(self.path):
print('Plotted to %s\n' % self.path)
def save_to_file(self, group, name):
if not os.path.exists(self.path):
os.mkdir(self.path)
if group in self.plot_counts:
self.plot_counts[group] += 1
else:
self.plot_counts[group] = 0
plt.savefig('%s/%s_%d_%s' % (self.path, group, self.plot_counts[group], name))
plt.clf()
def prepare_all_messages(self):
generated = pd.DataFrame(columns=['msgID', 'time'])
# read and combine all generated messages
for f in utils.glob_csv_files(self.config_name, MESSAGES_GENERATED):
c = pd.read_csv(f)
generated = generated.append(c)
generated["time"] = pd.to_numeric(generated["time"])
# convert to seconds
generated['time'] = generated['time'] / 1000
generated.sort_values(by=['time', 'msgID'], inplace=True)
# make sure we don't lose lines
generated_count = generated.msgID.shape
# get max diff and set as bin size
for f in utils.glob_csv_files(self.config_name, MESSAGES_PROCESSED):
node_id = int(f.split('_')[1][:-4])
node = 'node[%s]' % node_id
node_suffix = '_%s' % node
node_time_column = 'time%s' % node_suffix
c = pd.read_csv(f)
generated = generated.merge(c, on='msgID', how='left', suffixes=[None, node_suffix])
generated.rename(columns={node_time_column: node}, inplace=True)
generated[node] = | pd.to_numeric(generated[node]) | pandas.to_numeric |
"""This module imports other modules to train the vgg16 model."""
from __future__ import print_function
from crop_resize_transform import model_data
from test import test
import matplotlib.pyplot as plt
import random
from scipy.io import loadmat
import numpy as np
import pandas as pd
import cv2 as cv
import glob
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Flatten, Dense, Dropout
from keras import backend as K
from keras import applications
K.clear_session()
# set seed for reproducibility
seed_val = 9000
np.random.seed(seed_val)
random.seed(seed_val)
# load the examples file
examples = loadmat('FLIC-full/examples.mat')
# reshape the examples array
examples = examples['examples'].reshape(-1,)
# each coordinate corresponds to the the below listed body joints/locations
# in the same order
joint_labels = ['lsho', 'lelb', 'lwri', 'rsho', 'relb', 'rwri', 'lhip',
'lkne', 'lank', 'rhip', 'rkne', 'rank', 'leye', 'reye',
'lear', 'rear', 'nose', 'msho', 'mhip', 'mear', 'mtorso',
'mluarm', 'mruarm', 'mllarm', 'mrlarm', 'mluleg', 'mruleg',
'mllleg', 'mrlleg']
# print list of known joints
known_joints = [x for i, x in enumerate(joint_labels) if i in np.r_[0:7, 9,
12:14, 16]]
target_joints = ['lsho', 'lelb', 'lwri', 'rsho', 'relb',
'rwri', 'leye', 'reye', 'nose']
# indices of the needed joints in the coordinates array
joints_loc_id = np.r_[0:6, 12:14, 16]
def joint_coordinates(joint):
"""Store necessary coordinates to a list."""
joint_coor = []
# Take mean of the leye, reye, nose to obtain coordinates for the head
joint['head'] = (joint['leye']+joint['reye']+joint['nose'])/3
joint_coor.extend(joint['lwri'].tolist())
joint_coor.extend(joint['lelb'].tolist())
joint_coor.extend(joint['lsho'].tolist())
joint_coor.extend(joint['head'].tolist())
joint_coor.extend(joint['rsho'].tolist())
joint_coor.extend(joint['relb'].tolist())
joint_coor.extend(joint['rwri'].tolist())
return joint_coor
# load the indices matlab file
train_indices = loadmat('FLIC-full/tr_plus_indices.mat')
# reshape the training_indices array
train_indices = train_indices['tr_plus_indices'].reshape(-1,)
# empty list to store train image ids
train_ids = []
# empty list to store train joints
train_jts = []
# empty list to store test image ids
test_ids = []
# empty list to store test joints
test_jts = []
for i, example in enumerate(examples):
# image id
file_name = example[3][0]
# joint coordinates
joint = example[2].T
# dictionary that goes into the joint_coordinates function
joints = dict(zip(target_joints,
[x for k, x in enumerate(joint) if k in joints_loc_id]))
# obtain joints for the task
joints = joint_coordinates(joints)
# use train indices list to decide if an image is to be used for training
# or testing
if i in train_indices:
train_ids.append(file_name)
train_jts.append(joints)
else:
test_ids.append(file_name)
test_jts.append(joints)
# Concatenate image ids dataframe and the joints dataframe and save it as a csv
train_df = pd.concat([pd.DataFrame(train_ids), pd.DataFrame(train_jts)],
axis=1)
test_df = pd.concat([pd.DataFrame(test_ids), pd.DataFrame(test_jts)], axis=1)
train_df.to_csv('FLIC-full/train_joints.csv', index=False, header=False)
test_df.to_csv('FLIC-full/test_joints.csv', index=False, header=False)
# load train_joints.csv
train_data = pd.read_csv('FLIC-full/train_joints.csv', header=None)
# load test_joints.csv
test_data = | pd.read_csv('FLIC-full/test_joints.csv', header=None) | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": | pandas.StringDtype() | pandas.StringDtype |
from __future__ import annotations
import numpy as np
from numpy.linalg import lstsq
from numpy.random import RandomState, standard_normal
from numpy.testing import assert_allclose
from pandas import Categorical, DataFrame, date_range, get_dummies
from pandas.testing import assert_frame_equal, assert_series_equal
from linearmodels.panel.data import PanelData
from linearmodels.shared.utility import AttrDict, panel_to_frame
from linearmodels.typing import Literal
try:
import xarray # noqa: F401
MISSING_XARRAY = False
except ImportError:
MISSING_XARRAY = True
datatypes = ["numpy", "pandas"]
if not MISSING_XARRAY:
datatypes += ["xarray"]
def lsdv(
y: DataFrame, x: DataFrame, has_const=False, entity=False, time=False, general=None
):
nvar = x.shape[1]
temp = x.reset_index()
cat_index = temp.index
if entity:
cat = Categorical(temp.iloc[:, 0])
cat.index = cat_index
dummies = get_dummies(cat, drop_first=has_const)
x = DataFrame(
np.c_[x.values, dummies.values.astype(np.float64)],
index=x.index,
columns=list(x.columns) + list(dummies.columns),
)
if time:
cat = Categorical(temp.iloc[:, 1])
cat.index = cat_index
dummies = get_dummies(cat, drop_first=(has_const or entity))
x = DataFrame(
np.c_[x.values, dummies.values.astype(np.float64)],
index=x.index,
columns=list(x.columns) + list(dummies.columns),
)
if general is not None:
cat = Categorical(general)
cat.index = cat_index
dummies = get_dummies(cat, drop_first=(has_const or entity or time))
x = DataFrame(
np.c_[x.values, dummies.values.astype(np.float64)],
index=x.index,
columns=list(x.columns) + list(dummies.columns),
)
w = np.ones_like(y)
wy = w * y.values
wx = w * x.values
params = lstsq(wx, wy, rcond=None)[0]
params = params.squeeze()
return params[:nvar]
def generate_data(
missing: bool,
datatype: Literal["pandas", "xarray", "numpy"],
const: bool = False,
ntk: tuple[int, int, int] = (971, 7, 5),
other_effects: int = 0,
rng: RandomState | None = None,
num_cats: int | list[int] = 4,
):
if rng is None:
np.random.seed(12345)
else:
np.random.set_state(rng.get_state())
from linearmodels.typing import Float64Array
n, t, k = ntk
k += const
x = standard_normal((k, t, n))
beta = np.arange(1, k + 1)[:, None, None] / k
y: Float64Array = np.empty((t, n), dtype=np.float64)
y[:, :] = (x * beta).sum(0) + standard_normal((t, n)) + 2 * standard_normal((1, n))
w = np.random.chisquare(5, (t, n)) / 5
c = np.empty((y.size, 0), dtype=int)
if other_effects == 1:
cats = ["Industries"]
else:
cats = ["cat." + str(i) for i in range(other_effects)]
if other_effects:
if isinstance(num_cats, int):
num_cats = [num_cats] * other_effects
oe = []
for i in range(other_effects):
nc = num_cats[i]
oe.append(np.random.randint(0, nc, (1, t, n)))
c = np.concatenate(oe, 0)
vcats = ["varcat." + str(i) for i in range(2)]
vc2 = np.ones((2, t, 1)) @ np.random.randint(0, n // 2, (2, 1, n))
vc1 = vc2[[0]]
if const:
x[0] = 1.0
if missing > 0:
locs = np.random.choice(n * t, int(n * t * missing))
y.flat[locs] = float(np.nan)
locs = np.random.choice(n * t * k, int(n * t * k * missing))
x.flat[locs] = float(np.nan)
if rng is not None:
rng.set_state(np.random.get_state())
if datatype == "numpy":
return AttrDict(y=y, x=x, w=w, c=c, vc1=vc1, vc2=vc2)
entities = ["firm" + str(i) for i in range(n)]
time = date_range("1-1-1900", periods=t, freq="A-DEC")
var_names = ["x" + str(i) for i in range(k)]
# y = DataFrame(y, index=time, columns=entities)
y_df = panel_to_frame(
y[None], items=["y"], major_axis=time, minor_axis=entities, swap=True
)
w_df = panel_to_frame(
w[None], items=["w"], major_axis=time, minor_axis=entities, swap=True
)
w_df = w_df.reindex(y_df.index)
x_df = panel_to_frame(
x, items=var_names, major_axis=time, minor_axis=entities, swap=True
)
x_df = x_df.reindex(y_df.index)
if c.shape[1]:
c_df = panel_to_frame(
c, items=cats, major_axis=time, minor_axis=entities, swap=True
)
else:
c_df = DataFrame(index=y_df.index)
c_df = c_df.reindex(y_df.index)
vc1_df = panel_to_frame(
vc1, items=vcats[:1], major_axis=time, minor_axis=entities, swap=True
)
vc1_df = vc1_df.reindex(y_df.index)
vc2_df = panel_to_frame(
vc2, items=vcats, major_axis=time, minor_axis=entities, swap=True
)
vc2_df = vc2_df.reindex(y_df.index)
if datatype == "pandas":
return AttrDict(y=y_df, x=x_df, w=w_df, c=c_df, vc1=vc1_df, vc2=vc2_df)
assert datatype == "xarray"
import xarray as xr
from xarray.core.dtypes import NA
x_xr = xr.DataArray(
PanelData(x_df).values3d,
coords={"entities": entities, "time": time, "vars": var_names},
dims=["vars", "time", "entities"],
)
y_xr = xr.DataArray(
PanelData(y_df).values3d,
coords={"entities": entities, "time": time, "vars": ["y"]},
dims=["vars", "time", "entities"],
)
w_xr = xr.DataArray(
PanelData(w_df).values3d,
coords={"entities": entities, "time": time, "vars": ["w"]},
dims=["vars", "time", "entities"],
)
c_vals = PanelData(c_df).values3d if c.shape[1] else NA
c_xr = xr.DataArray(
c_vals,
coords={"entities": entities, "time": time, "vars": c_df.columns},
dims=["vars", "time", "entities"],
)
vc1_xr = xr.DataArray(
PanelData(vc1_df).values3d,
coords={"entities": entities, "time": time, "vars": vc1_df.columns},
dims=["vars", "time", "entities"],
)
vc2_xr = xr.DataArray(
PanelData(vc2_df).values3d,
coords={"entities": entities, "time": time, "vars": vc2_df.columns},
dims=["vars", "time", "entities"],
)
return AttrDict(y=y_xr, x=x_xr, w=w_xr, c=c_xr, vc1=vc1_xr, vc2=vc2_xr)
def assert_results_equal(res1, res2, test_fit=True, test_df=True, strict=True):
n = min(res1.params.shape[0], res2.params.shape[0])
assert_series_equal(res1.params.iloc[:n], res2.params.iloc[:n])
| assert_series_equal(res1.pvalues.iloc[:n], res2.pvalues.iloc[:n]) | pandas.testing.assert_series_equal |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = | IntervalIndex.from_intervals(index.values, copy=False) | pandas.IntervalIndex.from_intervals |
import skimage.feature
import skimage.transform
import skimage.filters
import scipy.interpolate
import scipy.ndimage
import scipy.spatial
import scipy.optimize
import numpy as np
import pandas
import plot
class ParticleFinder:
def __init__(self, image):
"""
Class for finding circular particles
:param image:
"""
self.image = image
self.n = 100
self.size_range = (5, 30)
self.mean = np.mean(self.image)
self.min = np.min(self.image)
self.max = np.max(self.image)
def locate_particles(self, n=100, size_range=(5, 30)):
"""
Find circular particles in the image
:param size_range:
:rtype : pandas.DataFrame
:param n:
:return:
"""
self.n = int(np.round(n))
self.size_range = size_range
# 1. Detect blobs in image
blobs = self.locate_circles()
if blobs.empty:
return pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
# 2. Find circles
fit = pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
for i, blob in blobs.iterrows():
fit = pandas.concat([fit, self.find_circle(blob)], ignore_index=True)
return fit
def locate_circles(self):
"""
Locate blobs in the image by using a Laplacian of Gaussian method
:rtype : pandas.DataFrame
:return:
"""
radii = np.linspace(self.size_range[0], self.size_range[1],
num=min(abs(self.size_range[0] - self.size_range[1]) * 2.0, 30), dtype=np.float)
# Find edges
edges = skimage.feature.canny(self.image)
circles = skimage.transform.hough_circle(edges, radii)
fit = pandas.DataFrame(columns=['r', 'y', 'x', 'accum'])
for radius, h in zip(radii, circles):
peaks = skimage.feature.peak_local_max(h, threshold_rel=0.5, num_peaks=self.n)
accumulator = h[peaks[:, 0], peaks[:, 1]]
fit = pandas.concat(
[fit, pandas.DataFrame(data={'r': [radius] * peaks.shape[0], 'y': peaks[:, 0], 'x': peaks[:, 1],
'accum': accumulator})], ignore_index=True)
fit = self.merge_hough_same_values(fit)
return fit
@staticmethod
def flatten_multi_columns(col):
"""
:param col:
:param sep:
:return:
"""
if not type(col) is tuple:
return col
else:
return col[0]
def merge_hough_same_values(self, data):
"""
:param data:
:return:
"""
while True:
# Rescale positions, so that pairs are identified below a distance
# of 1. Do so every iteration (room for improvement?)
positions = data[['x', 'y']].values
mass = data['accum'].values
duplicates = scipy.spatial.cKDTree(positions, 30).query_pairs(np.mean(data['r']), p=2.0, eps=0.1)
if len(duplicates) == 0:
break
to_drop = []
for pair in duplicates:
# Drop the dimmer one.
if np.equal(*mass.take(pair, 0)):
# Rare corner case: a tie!
# Break ties by sorting by sum of coordinates, to avoid
# any randomness resulting from cKDTree returning a set.
dimmer = np.argsort(np.sum(positions.take(pair, 0), 1))[0]
else:
dimmer = np.argmin(mass.take(pair, 0))
to_drop.append(pair[dimmer])
data.drop(to_drop, inplace=True)
# Keep only brightest n circles
data = data.sort_values(by=['accum'], ascending=False)
data = data.head(self.n)
return data
def find_circle(self, blob):
"""
Find a circle based on the blob
:rtype : pandas.DataFrame
:param blob:
:return:
"""
# Get intensity in spline representation
rad_range = (-blob.r, blob.r)
intensity, (x, y, step_x, step_y) = self.get_intensity_interpolation(blob, rad_range)
if not self.check_intensity_interpolation(intensity):
return pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
# Find the coordinates of the edge
edge_coords = self.find_edge(intensity)
if np.isnan(edge_coords.x).any():
return pandas.DataFrame(columns=['r', 'y', 'x', 'dev'])
# Set outliers to mean of rest of x coords
edge_coords = self.remove_outliers(edge_coords)
# Convert to cartesian
coords = self.spline_coords_to_cartesian(edge_coords, rad_range, x, y, step_x, step_y)
# Fit the circle
fit = self.fit_circle(coords)
return fit
def get_intensity_interpolation(self, blob, rad_range):
"""
Create a spline representation of the intensity
:param r:
:param xc:
:param yc:
:param n:
:param rad_range:
:param spline_order:
:return:
"""
n = int(np.round(2 * np.pi * np.sqrt(blob.r ** 2)))
spline_order = 3
t = np.linspace(-np.pi, np.pi, n, endpoint=False)
normal_angle = np.arctan2(blob.r * np.sin(t), blob.r * np.cos(t))
x = blob.r * np.cos(t) + blob.x
y = blob.r * np.sin(t) + blob.y
step_x = np.cos(normal_angle)
step_y = np.sin(normal_angle)
steps = np.arange(rad_range[0], rad_range[1] + 1, 1)[np.newaxis, :]
x_rad = x[:, np.newaxis] + steps * step_x[:, np.newaxis]
y_rad = y[:, np.newaxis] + steps * step_y[:, np.newaxis]
# create a spline representation of the colloid region
bound_y = slice(max(round(blob.y - blob.r + rad_range[0]), 0),
min(round(blob.y + blob.r + rad_range[1] + 1), self.image.shape[0]))
bound_x = slice(max(round(blob.x - blob.r + rad_range[0]), 0),
min(round(blob.x + blob.r + rad_range[1] + 1), self.image.shape[1]))
interpolation = scipy.interpolate.RectBivariateSpline(np.arange(bound_y.start, bound_y.stop),
np.arange(bound_x.start, bound_x.stop),
self.image[bound_y, bound_x], kx=spline_order,
ky=spline_order, s=0)
intensity = interpolation(y_rad, x_rad, grid=False)
# check for points outside the image; set these to mean
mask = ((y_rad >= bound_y.stop) | (y_rad < bound_y.start) |
(x_rad >= bound_x.stop) | (x_rad < bound_x.start))
intensity[mask] = self.mean
return intensity, (x, y, step_x, step_y)
@staticmethod
def create_binary_mask(intensity):
# Create binary mask
thresh = skimage.filters.threshold_otsu(intensity)
mask = intensity > thresh
# Fill holes in binary mask
mask = scipy.ndimage.morphology.binary_fill_holes(mask)
return mask
@classmethod
def check_intensity_interpolation(cls, intensity):
"""
Check whether the intensity interpolation is bright on left, dark on right
:rtype : bool
:param intensity:
:return:
"""
binary_mask = cls.create_binary_mask(intensity)
parts = np.array_split(binary_mask, 2, axis=1)
mean_left = np.mean(parts[0])
mean_right = np.mean(parts[1])
return mean_left > 0.8 and 0.2 > mean_right
@classmethod
def find_edge(cls, intensity):
"""
Find the edge of the particle
:rtype : pandas.DataFrame
:param intensity:
:return:
"""
mask = cls.create_binary_mask(intensity)
# Take last x coord of left list, first x coord of right list and take y
coords = [(([i for i, l in enumerate(row) if l][-1] + [j for j, r in enumerate(row) if not r][0]) / 2.0, y) for
y, row in enumerate(mask) if True in row and False in row]
coords_df = pandas.DataFrame(columns=['x', 'y'], data=coords)
# Set the index
coords_df = coords_df.set_index('y', drop=False, verify_integrity=False)
# Generate index of all y values of intensity array
index = np.arange(0, intensity.shape[0], 1)
# Reindex with all y values, filling with NaN's
coords_df = coords_df.reindex(index, fill_value=np.nan)
# Try to interpolate missing x values
coords_df = coords_df.interpolate(method='nearest', axis=0).ffill().bfill()
return coords_df
@staticmethod
def remove_outliers(edge_coords):
"""
:param edge_coords:
:return:
"""
mean = np.mean(edge_coords.x)
comparison = 0.2 * mean
mask_outlier = abs(edge_coords.x - mean) > comparison
mask_no_outlier = abs(edge_coords.x - mean) <= comparison
mean_no_outlier = np.mean(edge_coords[mask_no_outlier].x)
edge_coords.ix[mask_outlier, 'x'] = mean_no_outlier
return edge_coords
@staticmethod
def spline_coords_to_cartesian(edge_coords, rad_range, x, y, step_x, step_y):
"""
Calculate cartesian coordinates from spline representation coordinates
:param max_slopes:
:param rad_range:
:param x:
:param y:
:param step_x:
:param step_y:
:return:
"""
r_dev = edge_coords.x - abs(rad_range[0])
x_new = (x + r_dev * step_x)
y_new = (y + r_dev * step_y)
data = {'x': list(x_new), 'y': list(y_new)}
coord_new = pandas.DataFrame(data)
return coord_new
@staticmethod
def fit_circle(features):
"""
From x, y points, returns an algebraic fit of a circle
(not optimal least squares, but very fast)
:param features:
:param r:
:param yc:
:param xc:
:return: returns center, radius and rms deviation from fitted
"""
# Get x,y
x = features.x
y = features.y
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
# calculation of the reduced coordinates
u = x - x_m
v = y - y_m
# linear system defining the center in reduced coordinates (uc, vc):
# Suu * uc + Suv * vc = (Suuu + Suvv)/2
# Suv * uc + Svv * vc = (Suuv + Svvv)/2
s_uv = np.sum(u * v)
s_uu = np.sum(u ** 2)
s_vv = np.sum(v ** 2)
s_uuv = np.sum(u ** 2 * v)
s_uvv = np.sum(u * v ** 2)
s_uuu = np.sum(u ** 3)
s_vvv = np.sum(v ** 3)
# Solving the linear system
a = np.array([[s_uu, s_uv], [s_uv, s_vv]])
b = np.array([s_uuu + s_uvv, s_vvv + s_uuv]) / 2.0
try:
solution, _, _, _ = np.linalg.lstsq(a, b)
except np.linalg.LinAlgError:
return | pandas.DataFrame(columns=['r', 'y', 'x', 'dev']) | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""
Binance API wrapper over Pandas lib.
"""
import inspect
import os
import sys
import time as tm
import warnings
from collections import Iterable
from functools import partial
import ccxt
import numpy as np
import pandas as pd
import requests as req
from ccxt.base import errors as apierr
from decorator import decorator
from panance.utils import cnum, is_empty
BASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(BASE_DIR)
pd.options.display.precision = 8
warnings.filterwarnings(action='ignore', category=FutureWarning)
__version__ = '0.1.6'
__author__ = '<NAME>'
__license__ = 'UNLICENSE'
__package__ = 'panance'
__description__ = 'Python 3 Binance API wrapper built over Pandas Library'
__site__ = 'https://github.com/havocesp/panance'
__email__ = '<EMAIL>'
__requirements__ = ['ccxt', 'pandas', 'numpy', 'requests', 'decorator']
__all__ = ['Panance', '__package__', '__version__', '__author__', '__site__',
'__description__', '__email__', '__requirements__', '__license__']
_LIMITS = [5, 10, 20, 50, 100, 500, 1000]
@decorator
def checker(fn, *args, **kwargs):
"""
Param validator decorator.
:param fn: reference to caller class instance
:param args: method call args
:param kwargs: method class kwargs
:return:
"""
args = [v for v in args]
self = args.pop(0) # type: ccxt.binance
try:
sig = inspect.signature(fn)
except Exception as err:
print(str(err))
return None
param_names = [p for p in sig.parameters.keys()]
detected_params = [f for f in ['currency', 'limit', 'coin', 'symbol', 'symbols'] if f in param_names]
if len(detected_params):
def get_value(_v):
value = kwargs.get(_v)
if _v in param_names and value is None:
arg_position = param_names.index(_v)
value = args[arg_position - 1]
return value
for dp in detected_params:
param_value = get_value(dp)
if param_value is None:
continue
if 'limit' in dp and not str(param_value) in [l for l in map(str, _LIMITS)]:
str_limits = ','.join([l for l in map(str, _LIMITS)])
raise ValueError('Invalid limit: {}\nAccepted values: {}'.format(str(param_value), str_limits))
elif dp in ['currency', 'coin', 'symbol', 'symbols']:
if 'symbols' not in dp and not isinstance(dp, Iterable):
param_value = [param_value]
symbol_list = [str(s).upper() for s in param_value]
if self.symbols is None or not len(self.symbols):
self.load_markets(True)
if not all([any((s not in self.currencies, s not in self.symbols)) for s in symbol_list]):
raise ValueError(
'There is a not a valid currency or symbol in function params: {}'.format(symbol_list))
return fn(self, *args, **kwargs)
class Panance(ccxt.binance):
"""
Binance API wrapper over Pandas lib.
"""
usd = 'USDT'
def __init__(self, key=None, secret=None, config=None):
"""
Constructor.
:param str key: user account Binance api key
:param str secret: user account Binance secret key
:param dict config: ccxt.binance configuration dict
"""
if config is None or not isinstance(config, dict):
config = dict(verbose=False, enableRateLimit=True, timeout=15000)
if 'apiKey' not in config or 'secret' not in config:
if [k for k in os.environ if 'BINANCE_KEY' in k and 'BINANCE_SECRET' in 'k']:
config.update(apiKey=os.getenv('BINANCE_KEY'), secret=os.getenv('BINANCE_SECRET'))
elif not is_empty(key) and not is_empty(secret):
config.update(apiKey=key, secret=secret)
super(Panance, self).__init__(config=config)
self.load_time_difference()
self.markets = self.load_markets()
self.symbols = [k for k in self.markets if k[-5:] in str('/' + self.usd) or k[-4:] in '/BTC']
self.currencies = [s for s in {k.split('/')[0] for k in self.symbols}]
self.currencies.append(self.usd)
self.usd_symbols = [k for k in self.symbols if k[-5:] in str('/' + self.usd)]
self.usd_currencies = [k.split('/')[0] for k in self.usd_symbols]
@checker
def _get_amount(self, coin, amount):
"""
Get coin amount.
Amount should be a float / int or an string value like "max" or a percentage like "10%",
:param coin: the coin where amount will be returned.
:param amount: a float or int with price, "max" word or a percentage like "10%"
:type amount: str pr float or int
:return float: amount as countable item, this is as a float instance
"""
if amount and isinstance(amount, str):
amount = str(amount).lower()
balance = self.get_balances(coin=coin)
if amount in 'max':
percent = 1.0
elif len(amount) > 1 and amount[-1] in '%':
percent = float(amount[:-1])
percent /= 100.0
else:
raise ValueError('Invalid amount.')
if all((balance is not None, not balance.empty)):
amount = balance['total'] * percent
else:
raise ValueError('Not enough balance for {} currency.'.format(coin))
if amount and isinstance(amount, float):
amount = round(amount, 8)
else:
raise ValueError('Invalid amount.')
return amount
@checker
def _get_price(self, symbol, price):
"""
Get price for a symbol.
If price contains "ask" or "bid", it's value will be retrieve from order book ask or bid entries.
:param symbol: slash sep formatted pair (example: BTC/USDT)
:param price: a float or int with price, "ask" or "bid"
:type price: str pr float or int
:return:
"""
if price is not None:
if str(price).lower() in ['ask', 'bid']:
field = str(price).lower()
return self.get_depth(symbol, limit=5)[field][0]
elif isinstance(price, float):
return round(price, 8)
else:
raise ValueError('Invalid price')
else:
raise ValueError('Invalid price')
@checker
def _get_since(self, timeframe='15m', limit=100):
"""
Return number of seconds resulting by doing:
>>> self.parse_timeframe(timeframe) * limit
:param str timeframe: accepted values: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 12h, 1d
:param int limit: limit of timeframes
:return int: number of seconds for limit and timeframe
"""
timeframe_mills = self.parse_timeframe(timeframe) * 1000.0
return int(ccxt.Exchange.milliseconds() - timeframe_mills * limit)
@checker
def get_tickers(self, symbols=None, market=None):
"""
Get all tickers (use market param to filter result by market).
:param list symbols: list of trade pairs
:param str market: accepted values: BTC, USDT
:return pd.DataFrame: ticker data filtered by market (if set)
"""
market = str(market).upper() if market and market in ['BTC', self.usd] else None
if market is None and symbols is not None:
symbols = [str(s).upper() for s in symbols if s in self.symbols]
elif market is not None and symbols is None:
symbols = [s for s in self.symbols if s.split('/')[1] in market]
else:
symbols = None
try:
if symbols:
raw = self.fetch_tickers(symbols)
else:
raw = self.fetch_tickers()
except (apierr.RequestTimeout, apierr.DDoSProtection, apierr.InvalidNonce) as err:
print(str(err))
return None
columns = [k for k in [k for k in raw.values()][0].keys()]
transposed = zip(k for k in [v.values() for v in raw.values()])
dict_data = dict(zip(columns, transposed))
del dict_data['info'], dict_data['average'], dict_data['timestamp'], dict_data['datetime']
df = pd.DataFrame(dict_data).dropna(axis=1)
df = df.round(8).set_index('symbol')
if (df.ask < 10.0).all():
df = df.round(dict(bidVolume=3, askVolume=3, baseVolume=0, percentage=2, quoteVolume=2))
return df.sort_values('quoteVolume', ascending=False)
@checker
def get_ticker(self, symbol):
"""
Get ticker for symbol.
Ticker fields:
ask 0.084969
askVolume 7.997
baseVolume 89046.924
bid 0.08493
bidVolume 2.301
change 0.000385
close 0.084969
datetime 2018-05-17T16:07:50.610Z
high 0.0854
last 0.084969
low 0.08371
open 0.084584
percentage 0.455
previousClose 0.084585
quoteVolume 7538.2366
timestamp 1526573270061
vwap 0.08465466
:param str symbol: slash sep formatted pair (example: BTC/USDT)
:return pd.Series: ticker data for symbol.
"""
try:
raw = self.fetch_ticker(symbol)
except (apierr.RequestTimeout,) as err:
print(str(err))
return None
del raw['info'], raw['symbol'], raw['average']
return pd.DataFrame({symbol: raw})[symbol]
@checker
def get_ohlc(self, symbol, timeframe='5m', limit=100):
"""
Get OHLC data for specific symbol and timeframe.
:param str symbol: a valid slash separated trade pair
:param str timeframe: accepted values: 1m, 5m, 15m, 30m, 1h, 2h, 4h, 12h, 1d
:param int limit: result rows limit
:return pd.DataFrame: OHLC data for specific symbol and timeframe.
"""
cols = ['date', 'open', 'high', 'low', 'close', 'volume']
since = self._get_since(timeframe=timeframe, limit=limit)
try:
data = self.fetch_ohlcv(symbol, timeframe=timeframe, since=since)
except (apierr.RequestTimeout, apierr.InvalidNonce) as err:
print(str(err))
tm.sleep(3)
return None
except (apierr.DDoSProtection,) as err:
print(str(err))
tm.sleep(15)
return None
seconds2datetime = partial(pd.to_datetime, unit='ms')
date = [seconds2datetime(v.pop(0)).round('1s') for v in data]
dt_index = pd.DatetimeIndex(date, name='date', tz='Europe/Madrid')
df = pd.DataFrame(data, columns=cols[1:], index=dt_index)
return df
@checker
def get_balances(self, coin=None, detailed=False):
"""
Get balance data.
:param str coin: if set only data for currency "coin" will be returned
:param detailed: if True detailed data will be added to result
:type detailed: bool
:return pd.DataFrame: balance data
"""
try:
raw = self.fetch_balance()
except (apierr.RequestTimeout, apierr.InvalidNonce, apierr.RequestTimeout) as err:
print(str(err))
return None
[raw.pop(f) for f in ['total', 'used', 'free', 'info'] if f in raw]
df = pd.DataFrame(raw).T.query('total > 0.0').T
result = pd.DataFrame()
if detailed:
symbols = ['BTC/USDT']
if all((coin is not None, str(coin).upper() in self.currencies, str(coin).upper() not in ['BTC', 'USDT'])):
symbols.append('{}/BTC'.format(coin))
else:
for c in df.keys():
if c not in ['BTC', 'USDT']:
symbols.append('{}/BTC'.format(c))
tickers = self.get_tickers(symbols=symbols)
if tickers is not None:
tickers = tickers.T
else:
print(' - [ERROR] Server return None for ticker data.')
sys.exit(1)
btc_usdt_last = tickers['BTC/USDT']['last']
for s in symbols:
c, b = s.split('/')
c_balance = df[c]
coin_total = c_balance['total']
if c in ['USDT', 'BTC']:
c_balance['total_{}'.format(c.lower())] = coin_total
if 'USDT' in c:
c_balance['total_btc'] = coin_total / btc_usdt_last
else:
c_balance['total_usdt'] = btc_usdt_last * c_balance['total_btc']
else:
ticker = tickers['{}/BTC'.format(c)]
c_balance['total_btc'] = coin_total * ticker['last']
c_balance['total_usdt'] = c_balance['total_btc'] * btc_usdt_last
result = result.append(c_balance)
else:
result = df
if all((coin is not None, str(coin).upper() in self.currencies, str(coin).upper() in result.T)):
result = result.T[str(coin).upper()]
return result.fillna(0.0)
@checker
def get_aggregated_trades(self, symbol, from_id=None, start=None, end=None, limit=500):
"""
Get aggregated trades for a symbol.
:param str symbol: trade pair
:param int from_id: get trades from specific id
:param int start: unix datetime starting date
:param int end: unix datetime ending date
:param int limit: row limits, max. 500 (default 500)
:return pd.DataFrame: aggregated trades as a Pandas DataFrame
"""
url = 'https://api.binance.com/api/v1/aggTrades?symbol={}'.format(symbol.replace('/', '').upper())
if from_id and isinstance(from_id, int):
url += '&fromId={:d}'.format(from_id)
else:
if start and isinstance(start, (int, float)):
start = int(start)
url += '&startTime={:d}'.format(start)
if end and isinstance(end, (int, float)):
end = int(end)
url += '&startTime={:d}'.format(end)
if limit != 500:
url += '&limit={:d}'.format(limit)
try:
response = req.get(url)
except (req.RequestException,) as err:
print(str(err))
return None
if response.ok:
raw = response.json()
cols = ['price', 'amount', 'first_id', 'last_id', 'timestamp']
df = | pd.DataFrame([[r['p'], r['q'], r['f'], r['l'], r['T']] for r in raw], columns=cols) | pandas.DataFrame |
"""
This module contains functions for preparing data that was extracted from the FPLManagerBase API for the calculations to follow.
"""
import datetime as dt
import numpy as np
import pandas as pd
from typing import Dict
from .common import Context, POSITION_BY_TYPE, STATS_TYPES
import collections
# Define type aliases
DF = pd.DataFrame
S = pd.Series
def get_next_gw_name(next_gw: int) -> str:
if next_gw == 1:
return 'Next GW'
return f'Next {next_gw} GWs'
def get_next_gw_counts(ctx: Context) -> Dict[str, int]:
return collections.OrderedDict([(get_next_gw_name(gw), gw) for gw in range(1, ctx.total_gws - ctx.next_gw + 2)])
def get_news(row: S):
"""Derives the text for the News column."""
if pd.isnull(row['News']) or row['News'] == '':
return None
date_part = '' if pd.isnull(row['News Date'] or row['News Date'] == 'None') else ' (' + dt.datetime.strftime(row['News Date'], '%d %b %Y') + ')'
return str(row['News']) + date_part
def prepare_players(players_raw: pd.DataFrame, ctx: Context) -> pd.DataFrame:
return (players_raw
.pipe(ctx.dd.remap, data_set='player')
.pipe(ctx.dd.strip_cols, data_set='player')
.assign(**{'ICT Index': lambda df: | pd.to_numeric(df['ICT Index']) | pandas.to_numeric |
from multiprocessing.sharedctypes import Value
from numpy import isin
import pandas as pd
import os, json, re, tempfile, logging, typing
from typing import Tuple
from jsonschema import Draft4Validator, ValidationError
from .. import db
from ..models import RawMetadataModel
from ..metadata.metadata_util import check_for_projects_in_metadata_db
from ..metadata.metadata_util import check_sample_and_project_ids_in_metadata_db
EXPERIMENT_TYPE_LOOKUP = \
[{'library_preparation': 'WHOLE GENOME SEQUENCING - SAMPLE', 'library_type': 'WHOLE GENOME',
'library_strategy': 'WGS', 'experiment_type': 'WGS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'WHOLE GENOME SEQUENCING HUMAN - SAMPLE', 'library_type': 'WHOLE GENOME',
'library_strategy': 'WGS', 'experiment_type': 'WGS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'WHOLE GENOME SEQUENCING - BACTERIA', 'library_type': 'WHOLE GENOME',
'library_strategy': 'WGS', 'experiment_type': 'WGS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'WGA', 'library_type': 'WGA',
'library_strategy': 'WGA', 'experiment_type': 'WGA', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'WHOLE EXOME CAPTURE - EXONS - SAMPLE', 'library_type': 'HYBRID CAPTURE - EXOME',
'library_strategy': 'WXS', 'experiment_type': 'WXS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'WHOLE EXOME CAPTURE - EXONS + UTR - SAMPLE', 'library_type': 'HYBRID CAPTURE - EXOME',
'library_strategy': 'WXS', 'experiment_type': 'WXS-UTR', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - RIBOSOME PROFILING - SAMPLE', 'library_type': 'TOTAL RNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'RIBOSOME-PROFILING', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - TOTAL RNA', 'library_type': 'TOTAL RNA',
'library_strategy': 'RNA-SEQ','experiment_type': 'TOTAL-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - MRNA', 'library_type': 'MRNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'POLYA-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - MRNA STRANDED - SAMPLE', 'library_type': 'RNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'POLYA-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - TOTAL RNA WITH RRNA DEPLETION - SAMPLE', 'library_type': 'RNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'TOTAL-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - LOW INPUT WITH RIBODEPLETION', 'library_type': 'MRNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'RIBODEPLETION', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - TOTAL RNA WITH GLOBIN DEPLETION', 'library_type': 'TOTAL RNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'TOTAL-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - MRNA RNA WITH GLOBIN DEPLETION', 'library_type': 'MRNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'POLYA-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': "RNA SEQUENCING - 3' END RNA-SEQ", 'library_type': 'MRNA',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'POLYA-RNA-3P', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': "SINGLE CELL -3' RNASEQ- SAMPLE", 'library_type': "SINGLE CELL-3' RNA",
'library_strategy': 'RNA-SEQ', 'experiment_type': 'TENX-TRANSCRIPTOME-3P', 'library_source': 'TRANSCRIPTOMIC_SINGLE_CELL','biomaterial_type':'UNKNOWN'},
{'library_preparation': "SINGLE CELL -3' RNASEQ- SAMPLE NUCLEI", 'library_type': "SINGLE CELL-3' RNA (NUCLEI)",
'library_strategy': 'RNA-SEQ', 'experiment_type': 'TENX-TRANSCRIPTOME-3P', 'library_source': 'TRANSCRIPTOMIC_SINGLE_CELL','biomaterial_type':'SINGLE_NUCLEI'},
{'library_preparation': "SINGLE CELL -5' RNASEQ- SAMPLE", 'library_type': "SINGLE CELL-5' RNA",
'library_strategy': 'RNA-SEQ', 'experiment_type': 'TENX-TRANSCRIPTOME-5P', 'library_source': 'TRANSCRIPTOMIC_SINGLE_CELL','biomaterial_type':'UNKNOWN'},
{'library_preparation': "SINGLE CELL -5' RNASEQ- SAMPLE NUCLEI", 'library_type': "SINGLE CELL-5' RNA (NUCLEI)",
'library_strategy': 'RNA-SEQ', 'experiment_type': 'TENX-TRANSCRIPTOME-5P', 'library_source': 'TRANSCRIPTOMIC_SINGLE_CELL','biomaterial_type':'SINGLE_NUCLEI'},
{'library_preparation': 'METAGENOMIC PROFILING - 16S RRNA SEQUENCING - SAMPLE', 'library_type': '16S',
'library_strategy': 'RNA-SEQ', 'experiment_type': '16S', 'library_source': 'METAGENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RNA SEQUENCING - SMALL RNA - SAMPLE', 'library_type': 'SMALL RNA',
'library_strategy': 'MIRNA-SEQ', 'experiment_type': 'SMALL-RNA', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'NCRNA-SEQ', 'library_type': 'NCRNA-SEQ',
'library_strategy': 'NCRNA-SEQ', 'experiment_type': 'NCRNA-SEQ', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'FL-CDNA', 'library_type': 'FL-CDNA',
'library_strategy': 'FL-CDNA', 'experiment_type': 'FL-CDNA', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'EST', 'library_type': 'EST',
'library_strategy': 'EST', 'experiment_type': 'EST', 'library_source': 'TRANSCRIPTOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'HI-C SEQ', 'library_type': 'HI-C SEQ',
'library_strategy': 'HI-C', 'experiment_type': 'HI-C', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'ATAC SEQ', 'library_type': 'ATAC SEQ',
'library_strategy': 'ATAC-SEQ', 'experiment_type': 'ATAC-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'DNASE-SEQ', 'library_type': 'DNASE-SEQ',
'library_strategy': 'DNASE-SEQ', 'experiment_type': 'DNASE-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'WCS', 'library_type': 'WCS',
'library_strategy': 'WCS', 'experiment_type': 'WCS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RAD-SEQ', 'library_type': 'RAD-SEQ',
'library_strategy': 'RAD-SEQ', 'experiment_type': 'RAD-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CLONE', 'library_type': 'CLONE',
'library_strategy': 'CLONE', 'experiment_type': 'CLONE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'POOLCLONE', 'library_type': 'POOLCLONE',
'library_strategy': 'POOLCLONE', 'experiment_type': 'POOLCLONE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'AMPLICON SEQUENCING - ILLUMINA TRUSEQ CUSTOM AMPLICON', 'library_type': 'AMPLICON SEQ',
'library_strategy': 'AMPLICON', 'experiment_type': 'AMPLICON', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CLONEEND', 'library_type': 'CLONEEND',
'library_strategy': 'CLONEEND', 'experiment_type': 'CLONEEND', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'FINISHING', 'library_type': 'FINISHING',
'library_strategy': 'FINISHING', 'experiment_type': 'FINISHING', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - SAMPLE', 'library_type': 'CHIP SEQ',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'CHIP-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - INPUT', 'library_type': 'CHIP SEQ - INPUT',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'CHIP-INPUT', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - TF', 'library_type': 'CHIP SEQ - TF',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'TF', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - BROAD PEAK', 'library_type': 'CHIP SEQ - BROAD PEAK',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'HISTONE-BROAD', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - NARROW PEAK', 'library_type': 'CHIP SEQ - NARROW PEAK',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'HISTONE-NARROW', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'MNASE-SEQ', 'library_type': 'MNASE-SEQ',
'library_strategy': 'MNASE-SEQ', 'experiment_type': 'MNASE-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'DNASE-HYPERSENSITIVITY', 'library_type': 'DNASE-HYPERSENSITIVITY',
'library_strategy': 'DNASE-HYPERSENSITIVITY', 'experiment_type': 'DNASE-HYPERSENSITIVITY', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'METHYLATION PROFILING - RRBS-SEQ - SAMPLE', 'library_type': 'RRBS-SEQ',
'library_strategy': 'BISULFITE-SEQ', 'experiment_type': 'RRBS-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'METHYLATION PROFILING - WHOLE GENOME BISULFITE SEQUENCING - SAMPLE', 'library_type': 'BISULFITE SEQ',
'library_strategy': 'BISULFITE-SEQ', 'experiment_type': 'WGBS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CTS', 'library_type': 'CTS',
'library_strategy': 'CTS', 'experiment_type': 'CTS', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'MRE-SEQ', 'library_type': 'MRE-SEQ',
'library_strategy': 'MRE-SEQ', 'experiment_type': 'MRE-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'METHYLATION PROFILING - MEDIP-SEQ - SAMPLE', 'library_type': 'MEDIP-SEQ',
'library_strategy': 'MEDIP-SEQ', 'experiment_type': 'MEDIP-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'METHYLATION PROFILING - MBD-SEQ - SAMPLE', 'library_type': 'MBD-SEQ',
'library_strategy': 'MBD-SEQ', 'experiment_type': 'MBD-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'TN-SEQ', 'library_type': 'TN-SEQ',
'library_strategy': 'TN-SEQ', 'experiment_type': 'TN-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'VALIDATION', 'library_type': 'VALIDATION',
'library_strategy': 'VALIDATION', 'experiment_type': 'VALIDATION', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'FAIRE-SEQ', 'library_type': 'FAIRE-SEQ',
'library_strategy': 'FAIRE-SEQ', 'experiment_type': 'FAIRE-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'SELEX', 'library_type': 'SELEX',
'library_strategy': 'SELEX', 'experiment_type': 'SELEX', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'RIP-SEQ', 'library_type': 'RIP-SEQ',
'library_strategy': 'RIP-SEQ', 'experiment_type': 'RIP-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIA-PET', 'library_type': 'CHIA-PET',
'library_strategy': 'CHIA-PET', 'experiment_type': 'CHIA-PET', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'SYNTHETIC-LONG-READ', 'library_type': 'SYNTHETIC-LONG-READ',
'library_strategy': 'SYNTHETIC-LONG-READ', 'experiment_type': 'SYNTHETIC-LONG-READ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'TARGETED CAPTURE AGILENT (PROBES PROVIDED BY COLL.) - SAMPLE', 'library_type': 'HYBRID CAPTURE - PANEL',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CUSTOM TARGET CAPTURE: 1 TO 499KB - SAMPLE', 'library_type': 'HYBRID CAPTURE - CUSTOM',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CUSTOM TARGET CAPTURE: 0.5 TO 2.9MB - SAMPLE', 'library_type': 'HYBRID CAPTURE - CUSTOM',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CUSTOM TARGET CAPTURE: 3 TO 5.9MB - SAMPLE', 'library_type': 'HYBRID CAPTURE - CUSTOM',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CUSTOM TARGET CAPTURE: 6 TO 11.9MB - SAMPLE', 'library_type': 'HYBRID CAPTURE - CUSTOM',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CUSTOM TARGET CAPTURE: 12 TO 24MB - SAMPLE', 'library_type': 'HYBRID CAPTURE - CUSTOM',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CUSTOM TARGET CAPTURE - TRUSIGHT CARDIO - SAMPLE', 'library_type': 'HYBRID CAPTURE - PANEL',
'library_strategy': 'TARGETED-CAPTURE', 'experiment_type': 'TARGETED-CAPTURE', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'TETHERED', 'library_type': 'TETHERED',
'library_strategy': 'TETHERED', 'experiment_type': 'TETHERED', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'NOME-SEQ', 'library_type': 'NOME-SEQ',
'library_strategy': 'NOME-SEQ', 'experiment_type': 'NOME-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'OTHER-SPECIFY IN COMMENT BOX', 'library_type': 'OTHER',
'library_strategy': 'UNKNOWN', 'experiment_type': 'UNKNOWN', 'library_source': 'UNKNOWN','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIRP SEQ', 'library_type': 'CHIRP SEQ',
'library_strategy': 'CHIRP SEQ', 'experiment_type': 'CHIRP SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': '4-C SEQ', 'library_type': '4-C SEQ',
'library_strategy': '4-C-SEQ', 'experiment_type': '4-C-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': '5-C SEQ', 'library_type': '5-C SEQ',
'library_strategy': '5-C-SEQ', 'experiment_type': '5-C-SEQ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'METAGENOMICS - OTHER', 'library_type': 'METAGENOMICS - OTHER',
'library_strategy': 'WGS', 'experiment_type': 'METAGENOMIC', 'library_source': 'METAGENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'DROP-SEQ-TRANSCRIPTOME', 'library_type': 'DROP-SEQ-TRANSCRIPTOME',
'library_strategy': 'RNA-SEQ', 'experiment_type': 'DROP-SEQ-TRANSCRIPTOME', 'library_source': 'TRANSCRIPTOMIC SINGLE CELL','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K27ME3', 'library_type': 'CHIP SEQ - H3K27ME3',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K27ME3', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K27AC', 'library_type': 'CHIP SEQ - H3K27AC',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K27AC', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K9ME3', 'library_type': 'CHIP SEQ - H3K9ME3',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K9ME3', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K36ME3', 'library_type': 'CHIP SEQ - H3K36ME3',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K36ME3', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3F3A', 'library_type': 'CHIP SEQ - H3F3A',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3F3A', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K4ME1', 'library_type': 'CHIP SEQ - H3K4ME1',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K4ME1', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K79ME2', 'library_type': 'CHIP SEQ - H3K79ME2',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K79ME2', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K79ME3', 'library_type': 'CHIP SEQ - H3K79ME3',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K79ME3', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K9ME1', 'library_type': 'CHIP SEQ - H3K9ME1',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K9ME1', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K9ME2', 'library_type': 'CHIP SEQ - H3K9ME2',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K9ME2', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H4K20ME1', 'library_type': 'CHIP SEQ - H4K20ME1',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H4K20ME1', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H2AFZ', 'library_type': 'CHIP SEQ - H2AFZ',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H2AFZ', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3AC', 'library_type': 'CHIP SEQ - H3AC',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3AC', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K4ME2', 'library_type': 'CHIP SEQ - H3K4ME2',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K4ME2', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K4ME3', 'library_type': 'CHIP SEQ - H3K4ME3',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K4ME3', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'},
{'library_preparation': 'CHIP SEQUENCING - H3K9AC', 'library_type': 'CHIP SEQ - H3K9AC',
'library_strategy': 'CHIP-SEQ', 'experiment_type': 'H3K9AC', 'library_source': 'GENOMIC','biomaterial_type':'UNKNOWN'}]
def _run_metadata_json_validation(
metadata_file: str,
schema_json: str) -> list:
try:
if not os.path.exists(metadata_file) or \
not os.path.exists(schema_json):
raise IOError("Input file error")
error_list = list()
with open(schema_json,'r') as jf:
schema = json.load(jf)
metadata_validator = Draft4Validator(schema)
metadata_json_fields = list(schema['items']['properties'].keys())
metadata_df = pd.read_csv(metadata_file)
metadata_df.fillna('', inplace=True)
if 'taxon_id' in metadata_df.columns:
metadata_df['taxon_id'] = \
metadata_df['taxon_id'].\
astype(str)
for header_name in metadata_df.columns:
if header_name not in metadata_json_fields:
error_list.append("Unexpected column {0} found")
duplicates = \
metadata_df[metadata_df.duplicated()]
for entry in duplicates.to_dict(orient="records"):
error_list.append(
"Duplicate entry found for sample {0}".\
format(entry.get("sample_igf_id")))
json_data = \
metadata_df.\
to_dict(orient='records')
validation_errors = \
sorted(
metadata_validator.iter_errors(json_data),
key=lambda e: e.path)
for err in validation_errors:
if isinstance(err, str):
error_list.append(err)
else:
if len(err.schema_path) > 2:
error_list.append(
"{0}: {1}".format(err.schema_path[2], err.message))
else:
error_list.append(
"{0}".format(err.message))
return error_list
except Exception as e:
raise ValueError(
"Failed to run json validation, error: {0}".\
format(e))
def _set_metadata_validation_status(
raw_metadata_id: int,
status: str,
report: str='') -> None:
try:
if status.upper() == 'VALIDATED':
try:
db.session.\
query(RawMetadataModel).\
filter(RawMetadataModel.raw_metadata_id==raw_metadata_id).\
update({
'status': 'VALIDATED',
'report': ''})
db.session.commit()
except:
db.session.rollback()
raise
elif status.upper() == 'FAILED':
try:
db.session.\
query(RawMetadataModel).\
filter(RawMetadataModel.raw_metadata_id==raw_metadata_id).\
update({
'status': 'FAILED',
'report': report})
db.session.commit()
except:
db.session.rollback()
raise
else:
db.session.\
query(RawMetadataModel).\
filter(RawMetadataModel.raw_metadata_id==raw_metadata_id).\
update({
'status': 'UNKNOWN'})
except Exception as e:
raise ValueError(
"Failed to set metadata status for id {0}, error: {1}".\
format(raw_metadata_id, e))
def validate_raw_metadata_and_set_db_status(
raw_metadata_id: int,
check_db: bool=True,
schema_json: str=os.path.join(os.path.dirname(__file__), 'metadata_validation.json')) -> str:
try:
error_list = list()
raw_metadata = \
db.session.\
query(RawMetadataModel).\
filter(RawMetadataModel.raw_metadata_id==raw_metadata_id).\
one_or_none()
if raw_metadata is None:
raise ValueError(
"No metadata entry found for id {0}".\
format(raw_metadata_id))
csv_data = raw_metadata.formatted_csv_data
if csv_data is None:
raise ValueError(
"No formatted csv error found for id {0}".\
format(raw_metadata_id))
with tempfile.TemporaryDirectory() as temp_dir:
metadata_file = os.path.join(temp_dir, 'metadata.csv')
with open(metadata_file, 'w') as fp:
fp.write(csv_data)
validation_errors = \
_run_metadata_json_validation(
metadata_file=metadata_file,
schema_json=schema_json)
if len(validation_errors) > 0:
error_list.\
extend(validation_errors)
metadata_df = pd.read_csv(metadata_file)
for entry in metadata_df.to_dict(orient="records"):
sample_id = entry.get('sample_igf_id'),
library_source = entry.get('library_source'),
library_strategy = entry.get('library_strategy'),
experiment_type = entry.get('experiment_type')
err = \
_validate_metadata_library_type(
sample_id=sample_id,
library_source=library_source,
library_strategy=library_strategy,
experiment_type=experiment_type)
if err is not None:
error_list.append(
"Metadata error: {0}, {1}".\
format(sample_id, err))
if check_db:
existing_metadata_errors = \
compare_metadata_sample_with_db(
metadata_file=metadata_file)
if len(existing_metadata_errors) > 0:
error_list.extend(existing_metadata_errors)
if len(error_list) > 0:
error_list = \
["{0}, {1}".format(i+1, e)
for i,e in enumerate(error_list)]
_set_metadata_validation_status(
raw_metadata_id=raw_metadata_id,
status='FAILED',
report='\n'.join(error_list))
return 'FAILED'
else:
_set_metadata_validation_status(
raw_metadata_id=raw_metadata_id,
report='',
status='VALIDATED')
return 'VALIDATED'
except Exception as e:
raise ValueError(
"Failed to get metadata for id {0}, error: {1}".\
format(raw_metadata_id, e))
def compare_metadata_sample_with_db(
metadata_file: str,
project_column: str='project_igf_id',
sample_column: str='sample_igf_id',
name_column: str='name',
email_column: str='email_id') -> list:
try:
errors = list()
df = pd.read_csv(metadata_file)
project_list = \
df[project_column].\
drop_duplicates().\
values.\
tolist()
sample_projects_df = \
df[[sample_column, project_column, name_column, email_column]].\
drop_duplicates()
sample_project_list = \
sample_projects_df.\
to_dict(orient='records')
sample_project_errors = \
check_sample_and_project_ids_in_metadata_db(
sample_project_list=sample_project_list,
check_missing=False)
if len(sample_project_errors) > 0:
errors.extend(sample_project_errors)
return errors
except Exception as e:
raise ValueError(
"Failed to compare metadata with db, error: {0}".\
format(e))
def _validate_metadata_library_type(
sample_id: str,
library_source: str,
library_strategy: str,
experiment_type: str) -> str:
'''
A staticmethod for validating library metadata information for sample
:param sample_id: Sample name
:param library_source: Library source information
:param library_strategy: Library strategy information
:param experiment_type: Experiment type information
:returns: A error message string or None
'''
try:
error_msg = None
exp_lookup_data = pd.DataFrame(EXPERIMENT_TYPE_LOOKUP)
if library_source == 'GENOMIC':
library_strategy_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='GENOMIC']['library_strategy'].values)
library_strategy_list.append('UNKNOWN')
experiment_type_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='GENOMIC']['experiment_type'].values)
experiment_type_list.append('UNKNOWN')
if library_strategy not in library_strategy_list or \
experiment_type not in experiment_type_list:
error_msg = \
'{0}: library_strategy {1} or experiment_type {2} is not compatible with library_source {3}'.\
format(sample_id,
library_strategy,
experiment_type,
library_source)
elif library_source == 'TRANSCRIPTOMIC':
library_strategy_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='TRANSCRIPTOMIC']['library_strategy'].values)
library_strategy_list.append('UNKNOWN')
experiment_type_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='TRANSCRIPTOMIC']['experiment_type'].values)
experiment_type_list.append('UNKNOWN')
if library_strategy not in library_strategy_list or \
experiment_type not in experiment_type_list:
error_msg = \
'{0}: library_strategy {1} or experiment_type {2} is not compatible with library_source {3}'.\
format(sample_id,
library_strategy,
experiment_type,
library_source)
elif library_source == 'GENOMIC_SINGLE_CELL':
library_strategy_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='GENOMIC_SINGLE_CELL']['library_strategy'].values)
library_strategy_list.append('UNKNOWN')
experiment_type_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='GENOMIC_SINGLE_CELL']['experiment_type'].values)
experiment_type_list.append('UNKNOWN')
if library_strategy not in library_strategy_list or \
experiment_type not in experiment_type_list:
error_msg = \
'{0}: library_strategy {1} or experiment_type {2} is not compatible with library_source {3}'.\
format(sample_id,
library_strategy,
experiment_type,
library_source)
elif library_source == 'TRANSCRIPTOMIC_SINGLE_CELL':
library_strategy_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='TRANSCRIPTOMIC_SINGLE_CELL']['library_strategy'].values)
library_strategy_list.append('UNKNOWN')
experiment_type_list = \
list(exp_lookup_data[exp_lookup_data['library_source']=='TRANSCRIPTOMIC_SINGLE_CELL']['experiment_type'].values)
experiment_type_list.append('UNKNOWN')
if library_strategy not in library_strategy_list or \
experiment_type not in experiment_type_list:
error_msg = \
'{0}: library_strategy {1} or experiment_type {2} is not compatible with library_source {3}'.\
format(sample_id,
library_strategy,
experiment_type,
library_source)
return error_msg
except Exception as e:
raise ValueError(
"Failed to validate library type, error: {0}".\
format(e))
def mark_raw_metadata_as_ready(id_list: list) -> None:
try:
try:
db.session.\
query(RawMetadataModel).\
filter(RawMetadataModel.raw_metadata_id.in_(id_list)).\
filter(RawMetadataModel.status=="VALIDATED").\
update({'status': 'READY'}, synchronize_session='fetch')
db.session.commit()
except:
db.session.rollback()
raise
except Exception as e:
raise ValueError("Failed to mark metadata as ready, error: {0}".format(e))
def search_metadata_table_and_get_new_projects(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
if "project_list" not in data or \
not isinstance(data.get('project_list'), list):
raise ValueError("Missing project list")
project_list = data.get('project_list')
existing_projects = \
db.session.\
query(RawMetadataModel.metadata_tag).\
filter(RawMetadataModel.metadata_tag.in_(project_list)).\
all()
existing_projects = [i[0] for i in existing_projects]
new_projects = \
list(
set(project_list).\
difference(set(existing_projects)))
return new_projects
except Exception as e:
raise ValueError(
"Failed to search for new metadata, error: {0}".format(e))
def parse_and_add_new_raw_metadata(data):
try:
if isinstance(data, bytes):
data = json.loads(data.decode())
if isinstance(data, str):
data = json.loads(data)
if not isinstance(data, list):
raise TypeError(
"Expecting a list of metadata dictionary, got: {0}".\
format(type(data)))
try:
for entry in data:
metadata_tag = entry.get("metadata_tag")
raw_csv_data = entry.get("raw_csv_data")
formatted_csv_data = entry.get("formatted_csv_data")
if metadata_tag is None or \
raw_csv_data is None or \
formatted_csv_data is None:
raise KeyError("Missing metadata info")
exists = \
db.session.\
query(RawMetadataModel).\
filter(RawMetadataModel.metadata_tag==metadata_tag).\
one_or_none()
if isinstance(raw_csv_data, str):
raw_csv_data = json.loads(raw_csv_data)
if isinstance(formatted_csv_data, str):
formatted_csv_data = json.loads(formatted_csv_data)
raw_csv_data = pd.DataFrame(raw_csv_data).to_csv(index=False)
formatted_csv_data = | pd.DataFrame(formatted_csv_data) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = | pd.Index(['id1', 'id2', 'id3'], name='id') | pandas.Index |
import pandas as pd
import numpy as np
def build_items(master_red: pd.DataFrame, master_ubicaciones: pd.DataFrame, master_demanda, master_producto):
"""
Crea un df de items con 5 columnas donde se especifica tiempo, producto, nodo, tipo, y valor. Estamos
ignorando material importado, ya que toca hacer cambios a la tabla de ubicación para agregar a CGNA_PLANT como
CGNA_PLANT_DISTR
:param master_producto:
:param master_demanda:
:param master_ubicaciones:
:param master_red:
:return:
"""
# De hecho, se debe crear primero la sección de restricciones estáticas y dinámicas, ya que no dependen de producto.
# Delimitar cantidad de tiempo
MONTHS = sorted(master_demanda['fecha'].unique())
# Nodos totales y únicos de la red
nodos = pd.concat([master_red.loc[:, 'id_locacion_origen'], master_red.loc[:, 'id_locacion_destino']],
ignore_index=True).unique()
# Creamos DF final que tiene estructura definida en documentacion: `tiempo`, `producto`, `nodo`, `tipo`, `valor`
item_df = pd.DataFrame(columns=['tiempo', 'producto', 'nodo', 'tipo', 'valor'])
for t in MONTHS:
# RESTR DINAMICA Y ESTATICA: Extraemos restricciones dinámicas y estáticas y lo ponemos en formato de `item_df`
nodos_restr = master_ubicaciones.loc[:, ['id_locacion', 'capacidad_din', 'capacidad_est']]
nodos_restr = pd.melt(nodos_restr, id_vars=['id_locacion'], value_vars=['capacidad_din', 'capacidad_est'])
nodos_restr.columns = item_df.columns[-3:]
# Borramos las filas que tengan `nodos_restr[valor].isna()`
nodos_restr = nodos_restr.dropna(subset=['valor'])
# Añadimos tiempo `t` y producto `NaN` a esas restricciones para que se pueda concatenar a `item_df`
nodos_restr['tiempo'] = t
nodos_restr['producto'] = np.nan
# PRODUCTOS: seleccionamos los productos (familias) del master de demanda para el mes en cuestion
PRODUCTS = master_demanda.loc[master_demanda['fecha'] == t, 'familia'].unique()
for k in PRODUCTS:
# PRODUCCION: Buscamos el sitio origen del producto y su producción máx en master de productos.
# Debería ser solo UN origen
nodos_prod = master_producto.loc[master_producto['familia'] == k, ['familia',
'ubicacion_producto', 'produccion_max']]
# Renombrar y agregar columnas de tipo y tiempo
nodos_prod.columns = ['producto', 'nodo', 'valor']
nodos_prod['tipo'] = 'produccion'
nodos_prod['tiempo'] = t
# DEMANDA: buscar todos los clientes para producto k en tiempo t. Los clientes los tomaremos como ciudades
clientes_demanda = master_demanda.loc[(master_demanda['fecha'] == t) & (master_demanda['familia'] == k),
['id_ciudad', 'cantidad']]
# Renombrar y crear columnas restantes para que tenga estructura de `item_df`
clientes_demanda.columns = ['nodo', 'valor']
clientes_demanda['tiempo'] = t
clientes_demanda['producto'] = k
clientes_demanda['tipo'] = 'demanda'
# FLUJO: los nodos restantes son de flujo. Estos son la diferencia de conjuntos entre todos los nodos de la
# red, el nodo de produccion, y el nodo de demanda. Recordar que hay que borrar CLIENTE de los nodos únicos,
# ya que en ITEMS ya estará representado como `clientes_demanda`
nodos_flujo = list(set(nodos) - ({'CLIENTE'} | set(nodos_prod['nodo'])))
nodos_flujo = pd.DataFrame(data={'tiempo': t, 'producto': k, 'nodo': nodos_flujo,
'tipo': 'flujo', 'valor': 0})
# ITEMS: Concatenar las secciones que iteran por producto a `item_df`
item_df = pd.concat([item_df, nodos_prod, nodos_flujo, clientes_demanda], ignore_index=True)
# ITEMS: Concatenar las restricciones estática y dinámica a `item_df`
item_df = pd.concat([item_df, nodos_restr], ignore_index=True)
return item_df
def build_activities(master_red, master_tarifario, master_demanda, master_ubicaciones):
"""
Construye la tabla de Actividades que contiene 6 columnas: 'tiempo', 'producto', 'transporte', 'origen', 'destino', 'costo'.
Esos origenes y destinos pueden ser id_locaciones para comunicaciones entre nodos de la infraestructura de Esenttia,
o pueden ser id_ciudades para las entregas a clientes. En esta tabla se evidencian todas las actividades de distribución
y almacenamiento de la red, así como sus costos
:param master_ubicaciones:
:param master_demanda:
:param master_red:
:param master_tarifario:
:return:
"""
# Delimitar cuantos meses hay para t
MONTHS = sorted(master_demanda['fecha'].unique())
# Abrir red infraestructra, seleccionar columnas relevantes ['origen', 'destino']
master_red = master_red.loc[:, ['id_locacion_origen', 'id_locacion_destino']]
# Abrir master tarifario, seleccionar columnas relevantes
master_tarifario = master_tarifario[['id_ciudad_origen', 'id_ciudad_destino', 'capacidad', 'costo']]
# Crear DF final con estructura definida en documentación
actividad_df = pd.DataFrame(columns=['tiempo', 'producto', 'transporte', 'origen', 'destino', 'costo'])
for t in MONTHS:
# PRODUCTOS: seleccionamos los productos (familias) del master de demanda para el mes `t`
PRODUCTS = master_demanda.loc[master_demanda['fecha'] == t, 'familia'].unique()
for k in PRODUCTS:
# ALMACENAMIENTO: crear actividad de almacenamiento a partir de los nodos que tengan valor diferente a cero
# en capacidad_est en el master de ubicaciones. Es decir, que no sean NaN
nodos_alm = master_ubicaciones.loc[~master_ubicaciones['capacidad_est'].isna(),
['id_locacion', 'costo_almacenamiento']]
# Para distinguir almacenamiento (mov. en dimension tiempo) de demás actividades, agregar 'ALMACENAMIENTO'
nodos_alm['id_locacion'] = nodos_alm['id_locacion'] + '_ALMACENAMIENTO'
# Renombramos columnas
nodos_alm.columns = ['origen', 'costo']
# Agregar columna destino, que es una copia de la columna origen, producto, tiempo, y transporte
nodos_alm['destino'] = nodos_alm['origen'].copy()
nodos_alm['tiempo'] = t
nodos_alm['producto'] = k
nodos_alm['transporte'] = np.nan
# TRANSPORTE: Reemplazar CLIENTE de master_red por `id_ciudad` de `master_demanda`. Haremos un DF de la
# demanda, para luego hacerle un join con master_red de acuerdo a los sitios que pueden suplir CLIENTE
clientes_demanda = master_demanda.loc[(master_demanda['fecha'] == t) & (master_demanda['familia'] == k),
'id_ciudad'].to_frame()
clientes_demanda['key'] = 'CLIENTE'
# Separamos master_red entre los que tienen en destino CLIENTE y los que no
master_red_cliente = master_red.loc[master_red['id_locacion_destino'] == 'CLIENTE', :]
master_red_no_cliente = master_red.loc[~(master_red['id_locacion_destino'] == 'CLIENTE'), :]
# Cruzar `master_red_cliente` con `clientes_demanda`
master_red_cliente = master_red_cliente.merge(clientes_demanda, left_on=['id_locacion_destino'],
right_on=['key'], how='inner')
master_red_cliente = master_red_cliente.drop(columns=['id_locacion_destino', 'key'])
master_red_cliente = master_red_cliente.rename(columns={'id_ciudad': 'id_locacion_destino'})
# Volvemos a unir master_red_cliente con master_red
master_red_clean = pd.concat([master_red_no_cliente, master_red_cliente], ignore_index=True)
# Join entre tarifario y master de red
# Se hace inner join porque si no hay vehículos que transporten, no puede existir arco en el `master_red`.
nodos_trans = master_red_clean.merge(master_tarifario,
left_on=['id_locacion_origen', 'id_locacion_destino'],
right_on=['id_ciudad_origen', 'id_ciudad_destino'], how='inner')
# Renombramos columnas específicas para que tengan formato de `actividad_df`
nodos_trans = nodos_trans.rename(columns={'id_locacion_origen': 'origen',
'id_locacion_destino': 'destino',
'capacidad': 'transporte'})
# Filtrar columnas relevantes
nodos_trans = nodos_trans.loc[:, ['transporte', 'origen', 'destino', 'costo']]
# Crear columnas restantes para tener estructura de `actividad_df`
nodos_trans['tiempo'] = t
nodos_trans['producto'] = k
# ACIVIDADES: Concatenar nodos con transportes y almacenamiento a `actividad_df`
actividad_df = pd.concat([actividad_df, nodos_trans, nodos_alm], ignore_index=True)
return actividad_df
def matriz_coef(items_df: pd.DataFrame, actividades_df: pd.DataFrame):
"""
v.2
Función optimizada para crear la matriz de coeficientes con base a las actividades (columnas) e ítems (filas)
ingresadas. Explota la velocidad de procesamiento de pd.merge() para realizar el cruce de condiciones por escenario
o flujo.
Retorna un np.array de coeficientes, siendo los indices `items_df`, y las columnas `actividades_df`.
:param items_df: pd.DataFrame con los items del problema
:param actividades_df: pd.DataFrame con las actividades (flujos) del problema
:return: np.array con los coeficientes de entrada y salida de las actividades, en relación a las restricciones
"""
coef_mat = np.zeros((items_df.shape[0], actividades_df.shape[0]))
# Crear DFs para manejar tema de mutabilidad y columnas de indice de items y actividades
actividades_df = actividades_df.copy()
items_df = items_df.copy()
actividades_df['idy'] = actividades_df.index
items_df['idx'] = items_df.index
# Al ser seis grupos de condiciones, serían 6 JOIN. CONDICIONES:
# ENTRADA DE FLUJO. al ser INNER, no habrá valores nulos
cond1 = pd.merge(items_df, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'origen'], how='inner')
cond1['valor_mat'] = cond1['transporte'].copy()
# SALIDA DE FLUJO
cond2 = pd.merge(items_df, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'destino'], how='inner')
cond2['valor_mat'] = -cond2['transporte'].copy()
# ENTRADA INPUT A ALMACENAMIENTO
cond3_items = items_df.copy()
cond3_items.loc[:, 'nodo'] = cond3_items.loc[:, 'nodo'] + '_ALMACENAMIENTO'
cond3 = pd.merge(cond3_items, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'origen'], how='inner')
cond3['valor_mat'] = 1
del cond3_items
# SALIDA OUTPUT ALMACENAMIENTO
cond4_items = items_df.copy()
cond4_items.loc[:, 'tiempo'] -= 1
cond4_items.loc[:, 'nodo'] = cond4_items.loc[:, 'nodo'] + '_ALMACENAMIENTO'
cond4 = pd.merge(cond4_items, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'destino'], how='inner')
cond4['valor_mat'] = -1
del cond4_items
# MAXIMO ALMACENAMIENTO (CAP ESTATICA)
cond5_items = items_df.loc[items_df['tipo'] == 'capacidad_est'].copy()
cond5_items.loc[:, 'nodo'] = cond5_items.loc[:, 'nodo'] + '_ALMACENAMIENTO'
cond5 = pd.merge(cond5_items, actividades_df, left_on=['tiempo', 'nodo'], right_on=['tiempo', 'origen'],
how='inner')
cond5['valor_mat'] = 1
del cond5_items
# MAXIMO FLUJO (CAP DINAMICA)
cond6_items = items_df.loc[items_df['tipo'] == 'capacidad_din']
cond6 = pd.merge(cond6_items, actividades_df, left_on=['tiempo', 'nodo'], right_on=['tiempo', 'destino'],
how='inner')
cond6['valor_mat'] = cond6['transporte'].copy()
del cond6_items
condiciones = | pd.concat([cond1, cond2, cond3, cond4, cond5, cond6], ignore_index=True) | pandas.concat |
"""
Class Features
Name: driver_data_io_source
Author(s): <NAME> (<EMAIL>)
Date: '20200515'
Version: '1.0.0'
"""
######################################################################################
# Library
import logging
import os
import numpy as np
import pandas as pd
import glob
from copy import deepcopy
from lib_utils_hydro import read_file_hydro_sim, read_file_hydro_obs, parse_file_parts, \
create_file_tag, analyze_obj_hydro, create_obj_hydro
from lib_utils_io import read_obj, write_obj
from lib_utils_system import fill_tags2string, make_folder
from lib_utils_generic import get_dict_value
from lib_info_args import logger_name, time_format_algorithm
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
######################################################################################
# -------------------------------------------------------------------------------------
# Class DriverDischarge
class DriverDischarge:
# -------------------------------------------------------------------------------------
# Initialize class
def __init__(self, time_now, time_run, geo_data_collection, src_dict, anc_dict,
alg_ancillary=None, alg_template_tags=None,
flag_discharge_data_sim='discharge_data_simulated',
flag_discharge_data_obs='discharge_data_observed',
flag_cleaning_anc_discharge_sim=True, flag_cleaning_anc_discharge_obs=True):
self.time_now = time_now
self.time_run = time_run
self.geo_data_collection = geo_data_collection
self.flag_discharge_data_sim = flag_discharge_data_sim
self.flag_discharge_data_obs = flag_discharge_data_obs
self.alg_ancillary = alg_ancillary
self.alg_template_tags = alg_template_tags
self.file_name_tag = 'file_name'
self.folder_name_tag = 'folder_name'
self.parser_tag = 'parser'
self.variables_tag = 'variables'
self.method_data_analysis_tag = 'method_data_analysis'
self.method_data_filling_tag = 'method_data_filling'
self.time_period_tag = 'time_period'
self.time_rounding_tag = 'time_rounding'
self.time_frequency_tag = 'time_frequency'
self.domain_discharge_index_tag = 'discharge_idx'
self.domain_grid_x_tag = 'grid_x_grid'
self.domain_grid_y_tag = 'grid_y_grid'
self.domain_sections_db_tag = 'domain_sections_db'
self.var_name_time = 'time'
self.var_name_discharge = 'discharge'
self.var_name_water_level = 'water_level'
self.var_name_type = 'type'
self.domain_name_list = self.alg_ancillary['domain_name']
self.scenario_type = self.alg_ancillary['scenario_type']
self.scenario_boundary = self.alg_ancillary['scenario_boundary']
domain_section_dict = {}
for domain_name_step in self.domain_name_list:
domain_section_list = get_dict_value(geo_data_collection[domain_name_step], 'name_point_outlet', [])
domain_section_dict[domain_name_step] = domain_section_list
self.domain_section_dict = domain_section_dict
domain_hydro_dict = {}
for domain_name_step in self.domain_name_list:
domain_hydro_list = get_dict_value(geo_data_collection[domain_name_step], 'name_point_obs', [])
domain_hydro_dict[domain_name_step] = domain_hydro_list
self.domain_hydro_dict = domain_hydro_dict
self.folder_name_discharge_sim = src_dict[self.flag_discharge_data_sim][self.folder_name_tag]
self.file_name_discharge_sim = src_dict[self.flag_discharge_data_sim][self.file_name_tag]
self.variables_discharge_sim = src_dict[self.flag_discharge_data_sim][self.variables_tag]
self.method_data_analysis_sim = src_dict[self.flag_discharge_data_sim][self.method_data_analysis_tag]
self.method_data_filling_sim = src_dict[self.flag_discharge_data_sim][self.method_data_filling_tag]
self.time_period_discharge_sim = src_dict[self.flag_discharge_data_sim][self.time_period_tag]
self.time_rounding_discharge_sim = src_dict[self.flag_discharge_data_sim][self.time_rounding_tag]
self.time_frequency_discharge_sim = src_dict[self.flag_discharge_data_sim][self.time_frequency_tag]
if self.parser_tag in list(src_dict[self.flag_discharge_data_sim].keys()):
self.file_parser_sim = src_dict[self.flag_discharge_data_sim][self.parser_tag]
else:
self.file_parser_sim = None
self.folder_name_discharge_obs = src_dict[self.flag_discharge_data_obs][self.folder_name_tag]
self.file_name_discharge_obs = src_dict[self.flag_discharge_data_obs][self.file_name_tag]
self.variables_obs = src_dict[self.flag_discharge_data_obs][self.variables_tag]
self.method_data_analysis_obs = src_dict[self.flag_discharge_data_obs][self.method_data_analysis_tag]
self.method_data_filling_obs = src_dict[self.flag_discharge_data_obs][self.method_data_filling_tag]
self.time_period_discharge_obs = src_dict[self.flag_discharge_data_obs][self.time_period_tag]
self.time_rounding_discharge_obs = src_dict[self.flag_discharge_data_obs][self.time_rounding_tag]
self.time_frequency_discharge_obs = src_dict[self.flag_discharge_data_obs][self.time_frequency_tag]
if self.parser_tag in list(src_dict[self.flag_discharge_data_obs].keys()):
self.file_parser_obs = src_dict[self.flag_discharge_data_obs][self.parser_tag]
else:
self.file_parser_obs = None
self.file_prefix_sim, self.file_sep_sim, self.file_elem_sim = None, None, None
if self.file_parser_sim is not None:
self.file_prefix_sim = self.file_parser_sim['string_prefix']
self.file_sep_sim = self.file_parser_sim['string_sep']
self.file_elem_sim = self.file_parser_sim['string_element']
self.file_prefix_obs, self.file_sep_obs, self.file_elem_obs = None, None, None
if self.file_parser_obs is not None:
self.file_prefix_obs = self.file_parser_obs['string_prefix']
self.file_sep_obs = self.file_parser_obs['string_sep']
self.file_elem_obs = self.file_parser_obs['string_element']
self.format_group = '{:02d}'
if (self.folder_name_discharge_sim is not None) and (self.file_name_discharge_sim is not None):
self.file_path_discharge_sim = self.define_file_discharge(
self.time_run, self.folder_name_discharge_sim, self.file_name_discharge_sim,
file_name_prefix=self.file_prefix_sim, file_name_elem=self.file_elem_sim,
file_name_sep=self.file_sep_sim)
else:
log_stream.error(' ===> Source files of "overland_flow" are not defined ')
raise IOError('Overflow datasets is needed by the application.')
self.file_path_discharge_obs = self.define_file_discharge(
self.time_run, self.folder_name_discharge_obs, self.file_name_discharge_obs,
file_name_prefix=self.file_prefix_obs, file_name_elem=self.file_elem_obs, file_name_sep=self.file_sep_obs,
extra_args={'section_name_obj': self.domain_hydro_dict,
'time_rounding': self.time_rounding_discharge_obs,
'time_frequency': self.time_frequency_discharge_obs,
'time_period': self.time_period_discharge_obs})
self.var_time_dischargesim, self.var_discharge_discharge_sim, \
self.var_wlevel_discharge_sim = self.define_file_variables(self.variables_discharge_sim)
self.var_time_obs, self.var_discharge_obs, self.var_wlevel_obs = self.define_file_variables(self.variables_obs)
self.freq_discharge = 'H'
self.periods_discharge_from = 72
self.periods_discharge_to = 24
self.file_time_discharge = self.define_file_time()
self.folder_name_anc_sim = anc_dict[self.flag_discharge_data_sim][self.folder_name_tag]
self.file_name_anc_sim = anc_dict[self.flag_discharge_data_sim][self.file_name_tag]
self.folder_name_anc_obs = anc_dict[self.flag_discharge_data_obs][self.folder_name_tag]
self.file_name_anc_obs = anc_dict[self.flag_discharge_data_obs][self.file_name_tag]
self.file_path_anc_sim = self.define_file_ancillary(
self.time_now, self.folder_name_anc_sim, self.file_name_anc_sim)
self.file_path_anc_obs = self.define_file_ancillary(
self.time_now, self.folder_name_anc_obs, self.file_name_anc_obs)
self.flag_cleaning_anc_discharge_sim = flag_cleaning_anc_discharge_sim
self.flag_cleaning_anc_discharge_obs = flag_cleaning_anc_discharge_obs
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define file variable(s)
def define_file_variables(self, variables_obj):
var_time, var_discharge, var_water_level = None, None, None
for var_key, var_name in variables_obj.items():
if var_key == self.var_name_time:
var_time = var_name
elif var_key == self.var_name_discharge:
var_discharge = var_name
elif var_key == self.var_name_water_level:
var_water_level = var_name
return var_time, var_discharge, var_water_level
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define time period
def define_file_time(self):
time_run = self.time_run
time_day_start = time_run.replace(hour=0)
time_day_end = time_run.replace(hour=23)
time_period_from = pd.date_range(
end=time_day_start, periods=self.periods_discharge_from, freq=self.freq_discharge)
time_period_day = pd.date_range(
start=time_day_start, end=time_day_end, freq=self.freq_discharge)
time_period_to = pd.date_range(
start=time_day_end, periods=self.periods_discharge_to, freq=self.freq_discharge)
time_period = time_period_from.union(time_period_day).union(time_period_to)
return time_period
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define ancillary filename
def define_file_ancillary(self, time, folder_name_raw, file_name_raw):
alg_template_tags = self.alg_template_tags
file_path_dict = {}
for domain_name in self.domain_name_list:
alg_template_values = {'domain_name': domain_name,
'ancillary_sub_path_time_discharge': time,
'ancillary_datetime_discharge': time}
folder_name_def = fill_tags2string(folder_name_raw, alg_template_tags, alg_template_values)
file_name_def = fill_tags2string(file_name_raw, alg_template_tags, alg_template_values)
file_path_def = os.path.join(folder_name_def, file_name_def)
file_path_dict[domain_name] = file_path_def
return file_path_dict
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define discharge filename
def define_file_discharge(self, time, folder_name_raw, file_name_raw,
file_name_prefix='idro', file_name_suffix=None, file_name_sep='_', file_name_elem=3,
file_sort_descending=True, extra_args=None):
alg_template_tags = self.alg_template_tags
geo_data_collection = self.geo_data_collection
section_name_obj = None
time_period = None
time_rounding = None
time_frequency = None
if extra_args is not None:
if 'section_name_obj' in list(extra_args.keys()):
section_name_obj = extra_args['section_name_obj']
if 'time_period' in list(extra_args.keys()):
time_period = extra_args['time_period']
if 'time_rounding' in list(extra_args.keys()):
time_rounding = extra_args['time_rounding']
if 'time_frequency' in list(extra_args.keys()):
time_frequency = extra_args['time_frequency']
if (time_rounding is not None) and (time_period is not None):
time_range = pd.date_range(end=time, periods=time_period, freq=time_frequency)
time_start = time_range[0].floor(time_rounding)
time_end = time_range[-1]
else:
time_start = time
time_end = time
file_path_dict = {}
for domain_name in self.domain_name_list:
file_path_dict[domain_name] = {}
domain_id_list = get_dict_value(geo_data_collection[domain_name], 'id', []) # id mask
for domain_id in domain_id_list:
section_name_list = None
if section_name_obj is not None:
if domain_name in list(section_name_obj.keys()):
section_name_list = section_name_obj[domain_name]
domain_group = self.format_group.format(int(domain_id))
alg_template_values = {'domain_name': domain_name,
'source_sub_path_time_discharge_sim': time,
'source_datetime_from_discharge_sim': '*',
'source_datetime_to_discharge_sim': time_end,
'source_sub_path_time_discharge_obs': time,
'source_datetime_from_discharge_obs': '*',
'source_datetime_to_discharge_obs': time_end,
'ancillary_sub_path_time_discharge': time,
'ancillary_datetime_discharge': time,
'mask_discharge': '*' + domain_group,
'scenario_discharge': '*'}
if section_name_list is None:
folder_name_def = fill_tags2string(folder_name_raw, alg_template_tags, alg_template_values)
file_name_def = fill_tags2string(file_name_raw, alg_template_tags, alg_template_values)
file_path_def = os.path.join(folder_name_def, file_name_def)
section_path_found = glob.glob(file_path_def)
if file_name_elem is not None:
section_path_obj = []
for section_path_step in section_path_found:
folder_name_step, file_name_step = os.path.split(section_path_step)
if (file_name_prefix is not None) and (file_name_suffix is None):
if file_name_step.startswith(file_name_prefix):
prefix_check = False
file_name_parts = file_name_step.split(file_name_sep)
file_prefix_parts = file_name_prefix.split(file_name_sep)
if file_name_parts.__len__() == file_name_elem:
for prefix_id, prefix_step in enumerate(file_prefix_parts):
if prefix_step == file_name_parts[prefix_id]:
prefix_check = True
else:
prefix_check = False
break
if prefix_check:
section_path_obj.append(section_path_step)
elif (file_name_suffix is not None) and (file_name_prefix is None):
if file_name_step.endswith(file_name_suffix):
section_path_obj.append(section_path_step)
else:
log_stream.error(' ===> Filter using "prefix" and "suffix" is not supported ')
raise NotImplementedError('Case not implemented yet')
else:
section_path_obj = deepcopy(section_path_found)
if not section_path_obj:
log_stream.error(' ===> Discharge simulated file are not available using the following ' +
file_path_def + '. Try to use unfilled template string')
file_name_root = deepcopy(file_name_raw)
for template_key, template_value in alg_template_tags.items():
string_key = '{' + template_key + '}'
file_name_root = file_name_root.replace(string_key, '*')
file_part_start = file_name_root.split('*')[0]
file_part_end = file_name_root.split('*')[-1]
file_part_merge = ''
if file_part_start == file_part_end:
file_part_merge = file_part_start + '*'
elif file_part_start != file_part_end:
file_part_merge = file_part_start + '*' + file_part_end
log_stream.warning(' ===> Discharge simulated file are not available using the following ' +
file_name_def + '. Try to use unfilled template string ' + file_part_merge)
file_path_def = os.path.join(folder_name_def, file_part_merge)
section_path_obj = glob.glob(file_path_def)
section_path_obj.sort(reverse=file_sort_descending)
else:
section_path_obj = {}
for section_name_step in section_name_list:
alg_template_extra = {'section_name': section_name_step}
alg_template_values = {**alg_template_values, **alg_template_extra}
folder_name_def = fill_tags2string(folder_name_raw, alg_template_tags, alg_template_values)
file_name_def = fill_tags2string(file_name_raw, alg_template_tags, alg_template_values)
file_path_def = os.path.join(folder_name_def, file_name_def)
file_path_list = glob.glob(file_path_def)
file_path_list.sort(reverse=file_sort_descending)
section_path_obj[section_name_step] = file_path_list
file_path_dict[domain_name][domain_group] = section_path_obj
return file_path_dict
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to wrap method(s)
def organize_discharge(self):
if self.scenario_type == 'simulated':
section_collections = self.organize_discharge_sim()
elif self.scenario_type == 'observed':
section_collections = self.organize_discharge_obs()
elif self.scenario_type == 'mixed':
section_collections_sim = self.organize_discharge_sim()
section_collections_obs = self.organize_discharge_obs()
section_collections = self.organize_discharge_mixed(section_collections_obs, section_collections_sim)
else:
log_stream.error(' ===> Scenario type "' + self.scenario_type + '" is not expected')
raise RuntimeError('Scenario type permitted flags are: [observed, simulated]')
section_collections = self.filter_discharge(section_collections)
return section_collections
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to filter the discharge collections
def filter_discharge(self, section_collections):
time_run = self.time_run
geo_data_collection = self.geo_data_collection
log_stream.info(' ---> Filter discharge datasets [' + time_run.strftime(time_format_algorithm) + '] ... ')
file_time_discharge = self.file_time_discharge
scenario_boundary = self.scenario_boundary
section_collection_filter = {}
for domain_name_step in self.domain_name_list:
log_stream.info(' ----> Domain "' + domain_name_step + '" ... ')
domain_collections = section_collections[domain_name_step]
domain_discharge_index = geo_data_collection[domain_name_step][self.domain_discharge_index_tag]
domain_grid_rows = geo_data_collection[domain_name_step][self.domain_grid_x_tag].shape[0]
domain_grid_cols = geo_data_collection[domain_name_step][self.domain_grid_y_tag].shape[1]
domain_section_db = geo_data_collection[domain_name_step][self.domain_sections_db_tag]
section_workspace_filter, section_workspace_valid = {}, {}
time_first_list, time_last_list, idx_first_list, idx_last_list = [], [], [], []
for section_key, section_data in domain_section_db.items():
section_description = section_data['description']
log_stream.info(' -----> Section "' + section_description + '" ... ')
section_workspace_valid[section_key] = {}
if section_description in list(domain_collections.keys()):
time_series_collections = domain_collections[section_description]
time_first_valid = time_series_collections[self.var_name_discharge].first_valid_index()
idx_first_valid = time_series_collections[self.var_name_discharge].index.get_loc(time_first_valid)
time_last_valid = time_series_collections[self.var_name_discharge].last_valid_index()
idx_last_valid = time_series_collections[self.var_name_discharge].index.get_loc(time_last_valid)
time_first_list.append(time_first_valid)
time_last_list.append(time_last_valid)
idx_first_list.append(idx_first_valid)
idx_last_list.append(idx_last_valid)
time_series_attrs = {'time_first_valid': time_first_valid, 'time_last_valid': time_last_valid,
'idx_first_valid': idx_first_valid, 'idx_last_valid': idx_last_valid}
time_series_collections_attrs = {**time_series_attrs, **time_series_collections.attrs}
time_series_collections.attrs = deepcopy(time_series_collections_attrs)
section_workspace_valid[section_description] = deepcopy(time_series_collections)
else:
section_workspace_valid[section_description] = None
idx_first_select = max(idx_first_list)
idx_last_select = min(idx_last_list)
time_first_select = max(time_first_list)
time_last_select = min(time_last_list)
time_series_filter = | pd.DataFrame(index=file_time_discharge) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 =pd.merge(df7, df_2018, on='hospid')
df_sum_all_Years =pd.merge(df8, df_2019, on='hospid')
cols = df_sum_all_Years.columns.difference(['hospid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['hospid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years hospid.csv")
print(df_sum_all_Years)
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years hospid.csv")
print("num of hospital with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
return df_sum_all_Years
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='siteid')
df2 = pd.merge(df1, df2012, on='siteid')
df3 = pd.merge(df2, df2013, on='siteid')
df4 = pd.merge(df3, df2014, on='siteid')
df5 = pd.merge(df4, df2015, on='siteid')
df6 = pd.merge(df5, df2016, on='siteid')
df7 = pd.merge(df6, df2017, on='siteid')
df8 = pd.merge(df7, df2018, on='siteid')
df_sum_all_Years = pd.merge(df8, df2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / df_sum_all_Years['Distinct_years_reop']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid')
d2 = pd.merge(d1, df_12, on='siteid')
d3 = pd.merge(d2, df_13, on='siteid')
d4 = pd.merge(d3, df_14, on='siteid')
d5 = pd.merge(d4, df_15, on='siteid')
d6 = pd.merge(d5, df_16, on='siteid')
d7 = pd.merge(d6, df_17, on='siteid')
d8 = pd.merge(d7, df_18, on='siteid')
df_sum_all_Years_total = pd.merge(d8, df_19, on='siteid')
cols = df_sum_all_Years_total.columns.difference(['siteid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['siteid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / df_sum_all_Years_total['Distinct_years']
df_sum_all_Years_total.to_csv("First op sum all years siteid.csv")
# df_sum_all_Years.to_csv("sum all years siteid.csv")
# print(df_sum_all_Years)
# print("num of all sites: ", len(df_sum_all_Years))
#
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years siteid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
temp_first = pd.DataFrame()
temp_first['siteid'] = df_sum_all_Years_total['siteid']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_reop = pd.DataFrame()
temp_reop['siteid'] = df_sum_all_Years['siteid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
df20 = pd.merge(avg_siteid, temp_first, on='siteid', how='left')
total_avg_site_id = pd.merge(df20, temp_reop,on='siteid', how='left' )
total_avg_site_id['firstop/total'] = (total_avg_site_id['Year_avg_Firstop']/total_avg_site_id['total_year_avg'])*100
total_avg_site_id['reop/total'] = (total_avg_site_id['Year_avg_reop']/total_avg_site_id['total_year_avg'])*100
total_avg_site_id.to_csv('total_avg_site_id.csv')
# avg_siteid['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
# avg_siteid['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
def groupby_surgid_prcab():
df2010 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='surgid')
df2 = pd.merge(df1, df2012, on='surgid')
df3 = pd.merge(df2, df2013, on='surgid')
df4 = | pd.merge(df3, df2014, on='surgid') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 07:59:39 2021
@author: suriyaprakashjambunathan
"""
#Regressors
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.ensemble.forest import ExtraTreesRegressor
from sklearn.ensemble.bagging import BaggingRegressor
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.ensemble.weight_boosting import AdaBoostRegressor
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model.bayes import ARDRegression
from sklearn.linear_model.huber import HuberRegressor
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.passive_aggressive import PassiveAggressiveRegressor
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.linear_model.theil_sen import TheilSenRegressor
from sklearn.linear_model.ransac import RANSACRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neighbors.regression import KNeighborsRegressor
from sklearn.neighbors.regression import RadiusNeighborsRegressor
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from sklearn.tree.tree import DecisionTreeRegressor
from sklearn.tree.tree import ExtraTreeRegressor
from sklearn.svm.classes import SVR
from sklearn.linear_model import BayesianRidge
from sklearn.cross_decomposition import CCA
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import ElasticNetCV
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Lars
from sklearn.linear_model import LarsCV
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
from sklearn.linear_model import LassoLars
from sklearn.linear_model import LassoLarsIC
from sklearn.linear_model import LassoLarsCV
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.linear_model import MultiTaskElasticNetCV
from sklearn.linear_model import MultiTaskLasso
from sklearn.linear_model import MultiTaskLassoCV
from sklearn.svm import NuSVR
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.cross_decomposition import PLSCanonical
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import RidgeCV
from sklearn.svm import LinearSVR
# Classifiers
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import ComplementNB
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm.classes import OneClassSVM
from sklearn.neural_network.multilayer_perceptron import MLPClassifier
from sklearn.neighbors.classification import RadiusNeighborsClassifier
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.multioutput import ClassifierChain
from sklearn.multioutput import MultiOutputClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.passive_aggressive import PassiveAggressiveClassifier
from sklearn.gaussian_process.gpc import GaussianProcessClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.ensemble.bagging import BaggingClassifier
from sklearn.ensemble.forest import ExtraTreesClassifier
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.calibration import CalibratedClassifierCV
from sklearn.naive_bayes import GaussianNB
from sklearn.semi_supervised import LabelPropagation
from sklearn.semi_supervised import LabelSpreading
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import NearestCentroid
from sklearn.svm import NuSVC
from sklearn.linear_model import Perceptron
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.svm import SVC
from sklearn.mixture import GaussianMixture
Name_c = ['BaggingClassifier',
'BernoulliNB',
'CalibratedClassifierCV',
'ComplementNB',
'DecisionTreeClassifier',
'DummyClassifier',
'ExtraTreeClassifier',
'ExtraTreesClassifier',
'GaussianNB',
'GaussianProcessClassifier',
'GradientBoostingClassifier',
'HistGradientBoostingClassifier',
'KNeighborsClassifier',
'LabelPropagation',
'LabelSpreading',
'LinearDiscriminantAnalysis',
'LinearSVC',
'LogisticRegression',
'LogisticRegressionCV',
'MLPClassifier',
'MultinomialNB',
'NearestCentroid',
'PassiveAggressiveClassifier',
'Perceptron',
'QuadraticDiscriminantAnalysis',
'RadiusNeighborsClassifier',
'RandomForestClassifier',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SVC']
Name_r = [ "RandomForestRegressor",
"ExtraTreesRegressor",
"BaggingRegressor",
"GradientBoostingRegressor",
"AdaBoostRegressor",
"GaussianProcessRegressor",
"ARDRegression",
"HuberRegressor",
"LinearRegression",
"PassiveAggressiveRegressor",
"SGDRegressor",
"TheilSenRegressor",
"KNeighborsRegressor",
"RadiusNeighborsRegressor",
"MLPRegressor",
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"SVR",
"BayesianRidge",
"CCA",
"ElasticNet",
"ElasticNetCV",
"KernelRidge",
"Lars",
"LarsCV",
"Lasso",
"LassoCV",
"LassoLars",
"LassoLarsIC",
"LassoLarsCV",
"NuSVR",
"OrthogonalMatchingPursuit",
"OrthogonalMatchingPursuitCV",
"PLSCanonical",
"Ridge",
"RidgeCV",
"LinearSVR"]
# Importing the Libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import explained_variance_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.simplefilter(action='ignore')
# Fitting the nan values with the average
def avgfit(l):
na = pd.isna(l)
arr = []
for i in range(len(l)):
if na[i] == False:
arr.append(l[i])
avg = sum(arr)/len(arr)
fit_arr = []
for i in range(len(l)):
if na[i] == False:
fit_arr.append(l[i])
elif na[i] == True:
fit_arr.append(avg)
return(fit_arr)
# Weighted Mean Absolute Percentage Error
def mean_absolute_percentage_error(y_true, y_pred):
l = len(y_true)
num = 0
den = 0
for i in range(l):
num = num + (abs(y_pred[i] - y_true[i]))
den = den + y_true[i]
return abs(num/den) * 100
def regressors(Name, X_train,y_train):
regs = []
for i in range(len(Name)):
regressor = globals()[Name[i]]
print(regressor)
Regressor = regressor()
Regressor.fit(X_train, y_train)
regs.append(Regressor)
return(regs)
def classifiers(Name, X_train,y_train):
clfs = []
for i in range(len(Name)):
classifier = globals()[Name[i]]
print(classifier)
Classifier = classifier()
Classifier.fit(X_train, y_train)
clfs.append(Classifier)
return(clfs)
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = pd.DataFrame(Xi)
#y
bw = avgfit(list(dataset['bandwidth']))
dataset['bandwidth'] = bw
gain =avgfit(list(dataset['gain']))
dataset['gain'] = gain
vswr =avgfit(list(dataset['vswr']))
dataset['vswr'] = vswr
y1 = pd.DataFrame(bw)
y2 = pd.DataFrame(gain)
y3 = pd.DataFrame(vswr)
# Accuracy list
acc_list = []
params = ['bandwidth','gain','vswr']
y = pd.DataFrame()
y['bandwidth'] = bw
y['vswr'] = vswr
y['gain'] = gain
acc_conf = []
max_acc = []
for param in params:
print(param)
# Splitting into Test and Train set
X_train, X_test, y_train, y_test = train_test_split(Xi, y[param], test_size = 0.3, random_state = 0)
y_train = pd.DataFrame(y_train)
y_test = pd.DataFrame(y_test)
#print(name_r)
Regressor = regressors(Name_r,X_train,y_train)
for reg in Regressor :
y_pred = reg.predict(X_test)
wmape = mean_absolute_percentage_error(list(y_test[param]), list(y_pred))
if not np.isnan(wmape):
try:
acc_conf.append([param, reg, wmape[0]])
except:
acc_conf.append([param, reg, wmape])
wmape = pd.DataFrame(acc_conf)
wmape.to_csv('regressors_wmape.csv')
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = pd.DataFrame(Xi)
#y
bw = avgfit(list(dataset['bandwidth']))
dataset['bandwidth'] = bw
for i in range(len(bw)):
if bw[i] < 100:
bw[i] = 'Class 1'
elif bw[i] >= 100 and bw[i] < 115:
bw[i] = 'Class 2'
elif bw[i] >= 115 and bw[i] < 120:
bw[i] = 'Class 3'
elif bw[i] >= 120 and bw[i] < 121:
bw[i] = 'Class 4'
elif bw[i] >= 121 and bw[i] < 122:
bw[i] = 'Class 5'
elif bw[i] >= 122 :
bw[i] = 'Class 6'
gain =avgfit(list(dataset['gain']))
dataset['gain'] = gain
for i in range(len(gain)):
if gain[i] < 1.3:
gain[i] = 'Class 1'
elif gain[i] >= 1.3 and gain[i] < 1.5:
gain[i] = 'Class 2'
elif gain[i] >= 1.5 and gain[i] < 2.4:
gain[i] = 'Class 3'
elif gain[i] >= 2.4 and gain[i] < 2.7:
gain[i] = 'Class 4'
elif gain[i] >= 2.7 and gain[i] < 2.9:
gain[i] = 'Class 5'
elif gain[i] >= 2.9 and gain[i] < 3.5:
gain[i] = 'Class 6'
vswr =avgfit(list(dataset['vswr']))
dataset['vswr'] = vswr
for i in range(len(vswr)):
if vswr[i] >= 1 and vswr[i] < 1.16:
vswr[i] = 'Class 1'
elif vswr[i] >= 1.16 and vswr[i] < 1.32:
vswr[i] = 'Class 2'
elif vswr[i] >= 1.32 and vswr[i] < 1.5:
vswr[i] = 'Class 3'
elif vswr[i] >= 1.5 and vswr[i] < 2:
vswr[i] = 'Class 4'
elif vswr[i] >= 2 and vswr[i] < 4:
vswr[i] = 'Class 5'
elif vswr[i] >= 4:
vswr[i] = 'Class 6'
y1 = pd.DataFrame(bw)
y2 = pd.DataFrame(gain)
y3 = pd.DataFrame(vswr)
# Accuracy list
acc_list = []
params = ['bandwidth','gain','vswr']
y = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import logging
import warnings
import os
import pandas_datareader as pdr
from collections import Counter
from scipy import stats
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_percentage_error, mean_absolute_error
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
logging.basicConfig(filename='warnings.log',level=logging.WARNING)
logging.captureWarnings(True)
warnings.simplefilter("ignore")
def mape(y,pred):
return None if 0 in y else mean_absolute_percentage_error(y,pred) # average o(1) worst-case o(n)
def rmse(y,pred):
return mean_squared_error(y,pred)**.5
def mae(y,pred):
return mean_absolute_error(y,pred)
def r2(y,pred):
return r2_score(y,pred)
_estimators_ = {'arima', 'mlr', 'mlp', 'gbt', 'xgboost', 'rf', 'prophet', 'hwes', 'elasticnet','svr','knn','combo'}
_metrics_ = {'r2','rmse','mape','mae'}
_determine_best_by_ = {'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2','InSampleRMSE','InSampleMAPE','InSampleMAE',
'InSampleR2','ValidationMetricValue','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE',
'LevelTestSetR2',None}
_colors_ = [
'#FFA500','#DC143C','#00FF7F','#808000','#BC8F8F','#A9A9A9',
'#8B008B','#FF1493','#FFDAB9','#20B2AA','#7FFFD4','#A52A2A',
'#DCDCDC','#E6E6FA','#BDB76B','#DEB887'
]*10
class ForecastError(Exception):
class CannotUndiff(Exception):
pass
class NoGrid(Exception):
pass
class PlottingError(Exception):
pass
class Forecaster:
def __init__(self,
y=pd.Series([]),
current_dates=pd.Series([]),
**kwargs):
self.y = y
self.current_dates = current_dates
self.future_dates = pd.Series([])
self.current_xreg = {} # values should be pandas series (to make differencing work more easily)
self.future_xreg = {} # values should be lists (to make iterative forecasting work more easily)
self.history = {}
self.test_length = 1
self.validation_length = 1
self.validation_metric = 'rmse'
self.integration = 0
for key, value in kwargs.items():
setattr(self,key,value)
self.typ_set() # ensures that the passed values are the right types
def __str__(self):
models = self.history.keys()
if len(models) == 0:
first_prt = 'Forecaster object with no models evaluated.'
else:
first_prt = 'Forecaster object with the following models evaluated: {}.'.format(', '.join(models))
whole_thing = first_prt + ' Data starts at {}, ends at {}, loaded to forecast out {} periods, has {} regressors.'.format(self.current_dates.min(),self.current_dates.max(),len(self.future_dates),len(self.current_xreg.keys()))
return whole_thing
def __repr__(self):
if len(self.history.keys()) > 0:
return self.export('model_summaries')
return self.history
def _adder(self):
assert len(self.future_dates) > 0,'before adding regressors, please make sure you have generated future dates by calling generate_future_dates(), set_last_future_date(), or ingest_Xvars_df(use_future_dates=True)'
def _bank_history(self,**kwargs):
call_me = self.call_me
self.history[call_me] = {
'Estimator':self.estimator,
'Xvars':self.Xvars,
'HyperParams':{k:v for k,v in kwargs.items() if k not in ('Xvars','normalizer','auto')},
'Scaler':kwargs['normalizer'] if 'normalizer' in kwargs.keys() else None if self.estimator in ('prophet','combo') else None if hasattr(self,'univariate') else 'minmax',
'Forecast':self.forecast[:],
'FittedVals':self.fitted_values[:],
'Tuned':kwargs['auto'],
'Integration':self.integration,
'TestSetLength':self.test_length,
'TestSetRMSE':self.rmse,
'TestSetMAPE':self.mape,
'TestSetMAE':self.mae,
'TestSetR2':self.r2,
'TestSetPredictions':self.test_set_pred[:],
'TestSetActuals':self.test_set_actuals[:],
'InSampleRMSE':rmse(self.y.values,self.fitted_values),
'InSampleMAPE':mape(self.y.values,self.fitted_values),
'InSampleMAE':mae(self.y.values,self.fitted_values),
'InSampleR2':r2(self.y.values,self.fitted_values),
}
if kwargs['auto']:
self.history[call_me]['ValidationSetLength'] = self.validation_length
self.history[call_me]['ValidationMetric'] = self.validation_metric
self.history[call_me]['ValidationMetricValue'] = self.validation_metric_value
for attr in ('univariate','first_obs','first_dates','grid_evaluated','models'):
if hasattr(self,attr):
self.history[call_me][attr] = getattr(self,attr)
if self.integration > 0:
first_obs = self.first_obs.copy()
fcst = self.forecast[::-1]
integration = self.integration
y = self.y.to_list()[::-1]
pred = self.history[call_me]['TestSetPredictions'][::-1]
if integration == 2:
first_ = first_obs[1] - first_obs[0]
y.append(first_)
y = list(np.cumsum(y[::-1]))[::-1]
y.append(first_obs[0])
y = list(np.cumsum(y[::-1]))
fcst.append(y[-1])
fcst = list(np.cumsum(fcst[::-1]))[1:]
pred.append(y[-(len(pred) - 1)])
pred = list(np.cumsum(pred[::-1]))[1:]
if integration == 2:
fcst.reverse()
fcst.append(self.y.values[-2] + self.y.values[-1])
fcst = list(np.cumsum(fcst[::-1]))[1:]
pred.reverse()
pred.append(self.y.values[-(len(pred) - 2)] + self.y.values[-(len(pred) - 1)])
pred = list(np.cumsum(pred[::-1]))[1:]
self.history[call_me]['LevelForecast'] = fcst[:]
self.history[call_me]['LevelY'] = y[integration:]
self.history[call_me]['LevelTestSetPreds'] = pred
self.history[call_me]['LevelTestSetRMSE'] = rmse(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetMAPE'] = mape(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetMAE'] = mae(y[-len(pred):],pred)
self.history[call_me]['LevelTestSetR2'] = r2(y[-len(pred):],pred)
else: # better to have these attributes populated for all series
self.history[call_me]['LevelForecast'] = self.forecast[:]
self.history[call_me]['LevelY'] = self.y.to_list()
self.history[call_me]['LevelTestSetPreds'] = self.test_set_pred[:]
self.history[call_me]['LevelTestSetRMSE'] = self.rmse
self.history[call_me]['LevelTestSetMAPE'] = self.mape
self.history[call_me]['LevelTestSetMAE'] = self.mae
self.history[call_me]['LevelTestSetR2'] = self.r2
def _set_summary_stats(self):
results_summary = self.regr.summary()
results_as_html = results_summary.tables[1].as_html()
self.summary_stats = pd.read_html(results_as_html, header=0, index_col=0)[0]
def _bank_fi_to_history(self):
call_me = self.call_me
self.history[call_me]['feature_importance'] = self.feature_importance
def _bank_summary_stats_to_history(self):
call_me = self.call_me
self.history[call_me]['summary_stats'] = self.summary_stats
def _parse_normalizer(self,X_train,normalizer):
if normalizer == 'minmax':
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
elif normalizer == 'scale':
from sklearn.preprocessing import Normalizer
scaler = Normalizer()
scaler.fit(X_train)
else:
scaler = None
return scaler
def _train_test_split(self,X,y,test_size):
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=test_size,shuffle=False)
return X_train, X_test, y_train, y_test
def _metrics(self,y,pred):
self.test_set_actuals = list(y)
self.test_set_pred = list(pred)
self.rmse = rmse(y,pred)
self.r2 = r2(y,pred)
self.mae = mae(y,pred)
self.mape = mape(y,pred)
def _tune(self):
metric = getattr(self,getattr(self,'validation_metric'))
for attr in ('r2','rmse','mape','mae','test_set_pred','test_set_actuals'):
delattr(self,attr)
return metric
def _scale(self,scaler,X):
if not scaler is None:
return scaler.transform(X)
else:
return X
def _clear_the_deck(self):
for attr in ('univariate','fitted_values','regr','X','feature_importance','summary_stats','models'):
try:
delattr(self,attr)
except AttributeError:
pass
def _prepare_sklearn(self,tune,Xvars):
if Xvars is None:
Xvars = list(self.current_xreg.keys())
if tune:
y = self.y.to_list()[:-self.test_length]
X = pd.DataFrame({k:v.to_list() for k, v in self.current_xreg.items()}).iloc[:-self.test_length,:]
test_size = self.validation_length
else:
y = self.y.to_list()
X = pd.DataFrame({k:v.to_list() for k, v in self.current_xreg.items()})
test_size = self.test_length
X = X[Xvars]
self.Xvars = Xvars
return Xvars, y, X, test_size
def _forecast_sklearn(self,scaler,regr,X,y,Xvars,future_dates,future_xreg,true_forecast=False):
if true_forecast:
self._clear_the_deck()
X = self._scale(scaler,X)
regr.fit(X,y)
if true_forecast:
self.regr = regr
self.X = X
self.fitted_values = list(regr.predict(X))
if len([x for x in self.current_xreg.keys() if x.startswith('AR')]) > 0:
fcst = []
for i, _ in enumerate(future_dates):
p = pd.DataFrame({k:[v[i]] for k,v in future_xreg.items() if k in Xvars})
p = self._scale(scaler,p)
fcst.append(regr.predict(p)[0])
if not i == len(future_dates) - 1:
for k, v in future_xreg.items():
if k.startswith('AR'):
ar = int(k[2:])
idx = i + 1 - ar
if idx > -1:
try:
future_xreg[k][i+1] = fcst[idx]
except IndexError:
future_xreg[k].append(fcst[idx])
else:
try:
future_xreg[k][i+1] = self.y.values[idx]
except IndexError:
future_xreg[k].append(self.y.values[idx])
else:
p = pd.DataFrame(future_xreg)
p = self._scale(scaler,p)
fcst = list(regr.predict(p))
return fcst
def _full_sklearn(self,fcster,tune,Xvars,normalizer,**kwargs):
assert len(self.current_xreg.keys()) > 0,f'need at least 1 Xvar to forecast with the {self.estimator} model'
Xvars, y, X, test_size = self._prepare_sklearn(tune,Xvars)
X_train, X_test, y_train, y_test = self._train_test_split(X,y,test_size)
scaler = self._parse_normalizer(X_train,normalizer)
X_train = self._scale(scaler,X_train)
X_test = self._scale(scaler,X_test)
regr = fcster(**kwargs)
regr.fit(X_train,y_train)
pred = self._forecast_sklearn(scaler,regr,X_train,y_train,Xvars,self.current_dates.values[-test_size:], {x:v.values[-test_size:] for x,v in self.current_xreg.items()})
self._metrics(y_test,pred)
if tune:
return self._tune()
else:
return self._forecast_sklearn(scaler,regr,X,y,Xvars,self.future_dates,self.future_xreg.copy(),true_forecast=True)
def _forecast_mlp(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
""" normalizer: {'scale','minmax',None}, default 'minmax'
"""
from sklearn.neural_network import MLPRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_mlr(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.linear_model import LinearRegression as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_xgboost(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from xgboost import XGBRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_gbt(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.ensemble import GradientBoostingRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_rf(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.ensemble import RandomForestRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_elasticnet(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.linear_model import ElasticNet as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_svr(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.svm import SVR as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_knn(self,tune=False,Xvars=None,normalizer='minmax',**kwargs):
from sklearn.neighbors import KNeighborsRegressor as fcster
return self._full_sklearn(fcster,tune,Xvars,normalizer,**kwargs)
def _forecast_hwes(self,tune=False,**kwargs):
from statsmodels.tsa.holtwinters import ExponentialSmoothing as HWES
y = self.y.to_list()
if tune:
y_train = y[:-(self.validation_length + self.test_length)]
y_test = y[-(self.test_length + self.validation_length):-self.test_length]
else:
y_train = y[:-self.test_length]
y_test = y[-self.test_length:]
self.Xvars = None
hwes_train = HWES(y_train,dates=self.current_dates.values[:-self.test_length],freq=self.freq,**kwargs).fit(optimized=True,use_brute=True)
pred = hwes_train.predict(start=len(y_train),end=len(y_train) + len(y_test) - 1)
self._metrics(y_test,pred)
if tune:
return self._tune()
else: # forecast
self._clear_the_deck()
self.univariate = True
self.X = None
regr = HWES(self.y,dates=self.current_dates,freq=self.freq,**kwargs).fit(optimized=True,use_brute=True)
self.fitted_values = list(regr.fittedvalues)
self.regr = regr
self._set_summary_stats()
return list(regr.predict(start=len(y),end=len(y) + len(self.future_dates) - 1))
def _forecast_arima(self,tune=False,Xvars=None,**kwargs):
""" Xvars = 'all' will use all Xvars except any "AR" terms since they are special and incorporated in the model already anyway
"""
from statsmodels.tsa.arima.model import ARIMA
Xvars_orig = Xvars
Xvars = [x for x in self.current_xreg.keys() if not x.startswith('AR')] if Xvars == 'all' else Xvars
Xvars, y, X, test_size = self._prepare_sklearn(tune,Xvars)
if len(self.current_xreg.keys()) > 0:
X_train, X_test, y_train, y_test = self._train_test_split(X,y,test_size)
else:
y_train = self.y.values[:test_size]
y_test = self.y.values[-test_size:]
if Xvars_orig is None:
X, X_train, X_test = None, None, None
self.Xvars = None
arima_train = ARIMA(y_train,exog=X_train,dates=self.current_dates.values[:-self.test_length],freq=self.freq,**kwargs).fit()
pred = arima_train.predict(exog=X_test,start=len(y_train),end=len(y_train) + len(y_test) - 1,typ='levels')
self._metrics(y_test,pred)
if tune:
return self._tune()
else:
self._clear_the_deck()
if Xvars_orig is None: self.univariate = True
self.X = X
regr = ARIMA(self.y.values[:],exog=X,dates=self.current_dates,freq=self.freq,**kwargs).fit()
self.fitted_values = list(regr.fittedvalues)
self.regr = regr
self._set_summary_stats()
p = pd.DataFrame({k:v for k,v in self.future_xreg.items() if k in self.Xvars}) if self.Xvars is not None else None
fcst = regr.predict(exog=p,start=len(y),end=len(y) + len(self.future_dates) - 1, typ = 'levels', dynamic = True)
return list(fcst)
def _forecast_prophet(self,tune=False,Xvars=None,cap=None,floor=None,**kwargs):
""" Xvars = 'all' will use all Xvars except any "AR" terms since they are special and incorporated in the model already anyway
"""
from fbprophet import Prophet
X = pd.DataFrame({k:v for k,v in self.current_xreg.items() if not k.startswith('AR')})
p = pd.DataFrame({k:v for k,v in self.future_xreg.items() if not k.startswith('AR')})
Xvars = [x for x in self.current_xreg.keys() if not x.startswith('AR')] if Xvars == 'all' else Xvars if Xvars is not None else []
if cap is not None: X['cap'] = cap
if floor is not None: x['floor'] = floor
X['y'] = self.y.to_list()
X['ds'] = self.current_dates.to_list()
p['ds'] = self.future_dates.to_list()
model = Prophet(**kwargs)
for x in Xvars:
model.add_regressor(x)
if tune:
X_train = X.iloc[:-(self.test_length + self.validation_length)]
X_test = X.iloc[-(self.test_length + self.validation_length):-self.test_length]
y_test = X['y'].values[-(self.test_length + self.validation_length):-self.test_length]
model.fit(X_train)
pred = model.predict(X_test)
self._metrics(y_test,pred['yhat'].to_list())
return self._tune()
else:
model.fit(X.iloc[:-self.test_length])
pred = model.predict(X.iloc[-self.test_length:])
self._metrics(X['y'].values[-self.test_length:],pred['yhat'].to_list())
self._clear_the_deck()
self.X = X[Xvars]
if len(Xvars) == 0:
self.univariate = True
self.X = None
self.Xvars = Xvars if Xvars != [] else None
regr = Prophet(**kwargs)
regr.fit(X)
self.fitted_values = regr.predict(X)['yhat'].to_list()
self.regr = regr
fcst = regr.predict(p)
return fcst['yhat'].to_list()
def _forecast_combo(self,how='simple',models='all',determine_best_by='ValidationMetricValue',rebalance_weights=.1,weights=None,splice_points=None):
""" how: one of {'simple','weighted','splice'}, default 'simple'
the type of combination
all test lengths must be the same for all combined models
models: 'all', starts with "top_", or list-like, default 'all'
which models to combine
must be at least 2 in length
if using list-like object, elements must match model nicknames specified in call_me when forecasting
determine_best_by: one of {'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2InSampleRMSE','InSampleMAPE','InSampleMAE','InSampleR2','ValidationMetricValue','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE','LevelTestSetR2',None}, default 'ValidationMetricValue'
'TestSetRMSE','TestSetMAPE','TestSetMAE','TestSetR2InSampleRMSE','LevelTestSetRMSE','LevelTestSetMAPE','LevelTestSetMAE','LevelTestSetR2' will probably lead to overfitting (data leakage)
'InSampleMAPE','InSampleMAE','InSampleR2' probably will lead to overfitting since in-sample includes the test set and overfitted models are weighted more highly
'ValidationMetricValue' is the safest option to avoid overfitting, but only works if all combined models were tuned and the validation metric was the same for all models
rebalance_weights: float, default 0.1
a minmax/maxmin scaler is used to perform the weighted average, but this method means the worst performing model on the test set is always weighted 0
to correct that so that all models have some weight in the final combo, you can rebalance the weights but specifying this parameter
the higher this is, the closer to a simple average the weighted average becomes
must be at least 0 -- 0 means the worst model is not given any weight
weights: list-like or None
only applicable when how='weighted'
overwrites determine_best_by with None and applies those weights, automatically rebalances weights to add to one with a minmax scaler unless they already add to one
if weights already add to one, rebalance_weights is ignored
splice_points: list-like
only applicable when how='splice'
elements in array must be str in yyyy-mm-dd or datetime object
must be exactly one less in length than the number of models
models[0] --> :splice_points[0]
models[-1] --> splice_points[-1]:
"""
determine_best_by = determine_best_by if weights is None else None
models = self._parse_models(models,determine_best_by)
assert len(models) > 1,f'need at least two models to average, got {models}'
fcsts = pd.DataFrame({m:h['Forecast'] for m,h in self.history.items() if m in models})
preds = pd.DataFrame({m:h['TestSetPredictions'] for m,h in self.history.items() if m in models})
fvs = pd.DataFrame({m:h['FittedVals'] for m,h in self.history.items() if m in models})
actuals = self.y.values[-preds.shape[0]:]
if how == 'weighted':
scale = True
if weights is None:
weights = pd.DataFrame({m:[h[determine_best_by]] for m,h in self.history.items() if m in models}) # always use r2 since higher is better (could use maxmin scale for other metrics?)
else:
assert len(weights) == len(models),'must pass as many weights as models'
assert not isinstance(weights,str),f'weights argument not recognized: {weights}'
weights = pd.DataFrame(zip(models,weights)).set_index(0).transpose()
if weights.sum(axis=1).values[0] == 1:
scale = False
rebalance_weights=0
try:
assert rebalance_weights >= 0,'when using a weighted average, rebalance_weights must be numeric and at least 0 in value'
if scale:
if (determine_best_by.endswith('R2') == 'R2') | ((determine_best_by == 'ValidationMetricValue') & (self.validation_metric.upper() == 'R2')) | (weights is not None):
weights = (weights - weights.min(axis=1).values[0])/(weights.max(axis=1).values[0] - weights.min(axis=1).values[0]) # minmax scaler
else:
weights = (weights - weights.max(axis=1).values[0])/(weights.min(axis=1).values[0] - weights.max(axis=1).values[0]) # maxmin scaler
weights+=rebalance_weights # by default, add .1 to every value here so that every model gets some weight instead of 0 for the worst one
weights = weights/weights.sum(axis=1).values[0]
pred = (preds * weights.values[0]).sum(axis=1).to_list()
fv = (fvs * weights.values[0]).sum(axis=1).to_list()
fcst = (fcsts * weights.values[0]).sum(axis=1).to_list()
except ZeroDivisionError:
how = 'simple' # all models have the same test set metric value so it's a simple average (never seen this, but jic)
if how in ('simple','splice'):
pred = preds.mean(axis=1).to_list()
fv = fvs.mean(axis=1).to_list()
if how == 'simple':
fcst = fcsts.mean(axis=1).to_list()
elif how == 'splice':
assert len(models) == len(splice_points) + 1,'must have exactly 1 more model passed to models as splice points passed to splice_points'
splice_points = pd.to_datetime(sorted(splice_points)).to_list()
future_dates = self.future_dates.to_list()
assert np.array([p in future_dates for p in splice_points]).all(), 'all elements in splice_points must be datetime objects or str in yyyy-mm-dd format and must be in future_dates attribute'
fcst = [None]*len(future_dates)
start = 0
for i, _ in enumerate(splice_points):
end = [idx for idx,v in enumerate(future_dates) if v == splice_points[i]][0]
fcst[start:end] = fcsts[models[i]].values[start:end]
start = end
fcst[start:] = fcsts[models[-1]].values[start:]
self._metrics(actuals,pred)
self._clear_the_deck()
self.models = models
self.fitted_values = fv
self.Xvars = None
self.X = None
self.regr = None
return fcst
def _parse_models(self,models,determine_best_by):
if determine_best_by is None:
if models[:4] == 'top_':
raise ValueError('cannot use models starts with "top_" unless the determine_best_by or order_by argument is specified and not None')
elif models == 'all':
models = list(self.history.keys())
elif isinstance(models,str):
models = [models]
else:
models = list(models)
if len(models) == 0:
raise ValueError(f'models argument with determine_best_by={determine_best_by} returns no evaluated forecasts')
else:
all_models = [m for m,d in self.history.items() if determine_best_by in d.keys()]
all_models = self.order_fcsts(all_models,determine_best_by)
if models == 'all':
models = all_models[:]
elif models[:4] == 'top_':
models = all_models[:int(models.split('_')[1])]
elif isinstance(models,str):
models = [models]
else:
models = [m for m in all_models if m in models]
return models
def infer_freq(self):
if not hasattr(self,'freq'):
self.freq = pd.infer_freq(self.current_dates)
self.current_dates.freq = self.freq
def fillna_y(self,how='ffill'):
""" how: {'backfill', 'bfill', 'pad', 'ffill', None}
"""
self.y = pd.Series(self.y)
if how != 'midpoint': # only works if there aren't more than 2 na one after another
self.y = self.y.fillna(method=how)
else:
for i, val in enumerate(self.y.values):
if val is None:
self.y.values[i] = (self.y.values[i-1] + self.y.values[i+1]) / 2
def generate_future_dates(self,n):
""" way to specify future forecast dates by specifying a forecast period
"""
self.infer_freq()
self.future_dates = pd.Series(pd.date_range(start=self.current_dates.values[-1],periods=n+1,freq=self.freq).values[1:])
def set_last_future_date(self,date):
""" way to specify future forecast dates by specifying the last desired forecasted date and letting pandas infer the dates in between
"""
self.infer_freq()
if isinstance(date,str):
date = datetime.datetime.strptime(date,'%Y-%m-%d')
self.future_dates = pd.Series(pd.date_range(start=self.current_dates.values[-1],end=date,freq=self.freq).values[1:])
def typ_set(self):
self.y = pd.Series(self.y).dropna().astype(np.float64)
self.current_dates = pd.to_datetime(pd.Series(list(self.current_dates)[-len(self.y):]),infer_datetime_format=True)
assert len(self.y) == len(self.current_dates)
self.future_dates = pd.to_datetime(pd.Series(self.future_dates),infer_datetime_format=True)
for k,v in self.current_xreg.items():
self.current_xreg[k] = pd.Series(list(v)[-len(self.y):]).astype(np.float64)
assert len(self.current_xreg[k]) == len(self.y)
self.future_xreg[k] = [float(x) for x in self.future_xreg[k]]
def diff(self,i=1):
if hasattr(self,'first_obs'):
raise TypeError('series has already been differenced, if you want to difference again, use undiff() first, then diff(2)')
assert i in (1,2),f'only 1st and 2nd order integrations supported for now, got i={i}'
self.first_obs = self.y.values[:i] # np array
self.first_dates = self.current_dates.values[:i] # np array
self.integration = i
for _ in range(i):
self.y = self.y.diff()
for k, v in self.current_xreg.items():
if k.startswith('AR'):
ar = int(k[2:])
for _ in range(i):
self.current_xreg[k] = v.diff()
self.future_xreg[k] = [self.y.values[-ar]]
if hasattr(self,'adf_stationary'):
delattr(self,'adf_stationary')
def add_ar_terms(self,n):
self._adder()
assert isinstance(n,int),f'n must be an int, got {n}'
assert n > 0,f'n must be greater than 0, got {n}'
assert self.integration == 0,"AR terms must be added before differencing (don't worry, they will be differenced too)"
for i in range(1,n+1):
self.current_xreg[f'AR{i}'] = pd.Series(np.roll(self.y,i))
self.future_xreg[f'AR{i}'] = [self.y.values[-i]]
def add_AR_terms(self,N):
""" seasonal AR terms
N: tuple of len 2 (P,m)
"""
self._adder()
assert (len(N) == 2) & (not isinstance(N,str)),f'n must be an array-like of length 2 (P,m), got {N}'
assert self.integration == 0,"AR terms must be added before differencing (don't worry, they will be differenced too)"
for i in range(N[1],N[1]*N[0] + 1,N[1]):
self.current_xreg[f'AR{i}'] = pd.Series(np.roll(self.y,i))
self.future_xreg[f'AR{i}'] = [self.y.values[-i]]
def ingest_Xvars_df(self,df,date_col='Date',drop_first=False,use_future_dates=False):
assert df.shape[0] == len(df[date_col].unique()), 'each date supplied must be unique'
df[date_col] = pd.to_datetime(df[date_col]).to_list()
df = df.loc[df[date_col] >= self.current_dates.values[0]]
df = pd.get_dummies(df,drop_first=drop_first)
current_df = df.loc[df[date_col].isin(self.current_dates)]
future_df = df.loc[df[date_col] > self.current_dates.values[-1]]
assert current_df.shape[0] == len(self.y), 'something went wrong--make sure the dataframe spans the entire daterange as y and is at least one observation to the future and specify a date column in date_col parameter'
if not use_future_dates:
assert future_df.shape[0] >= len(self.future_dates),'the future dates in the dataframe should be at least the same length as the future dates in the Forecaster object. if you desire to use the dataframe to set the future dates for the object, use use_future_dates=True'
else:
self.infer_freq()
self.future_dates = future_df[date_col]
for c in [c for c in future_df if c != date_col]:
self.future_xreg[c] = future_df[c].to_list()[:len(self.future_dates)]
self.current_xreg[c] = current_df[c]
for x,v in self.future_xreg.items():
self.future_xreg[x] = v[:len(self.future_dates)]
if not len(v) == len(self.future_dates):
warnings.warn(f'warning: {x} is not the correct length in the future_dates attribute and this can cause errors when forecasting. its length is {len(v)} and future_dates length is {len(future_dates)}')
def set_test_length(self,n=1):
assert isinstance(n,int),f'n must be an int, got {n}'
self.test_length=n
def set_validation_length(self,n=1):
assert isinstance(n,int),f'n must be an int, got {n}'
assert n > 0,f'n must be greater than 1, got {n}'
if (self.validation_metric == 'r2') & (n == 1):
raise ValueError('can only set a validation_length of 1 if validation_metric is not r2. try set_validation_metric()')
self.validation_length=n
def adf_test(self,critical_pval=0.05,quiet=True,full_res=False,**kwargs):
res = adfuller(self.y.dropna(),**kwargs)
if not full_res:
if res[1] <= critical_pval:
if not quiet:
print('series appears to be stationary')
self.adf_stationary = True
return True
else:
if not quiet:
print('series might not be stationary')
self.adf_stationary = False
return False
else:
return res
def plot_acf(self,diffy=False,**kwargs):
""" https://www.statsmodels.org/dev/generated/statsmodels.graphics.tsaplots.plot_acf.html
"""
y = self.y.dropna() if not diffy else self.y.diff().dropna()
return plot_acf(y.values,**kwargs)
def plot_pacf(self,diffy=False,**kwargs):
""" https://www.statsmodels.org/dev/generated/statsmodels.graphics.tsaplots.plot_pacf.html
"""
y = self.y.dropna() if not diffy else self.y.diff().dropna()
return plot_pacf(y.values,**kwargs)
def plot_periodogram(self,diffy=False):
""" https://www.statsmodels.org/0.8.0/generated/statsmodels.tsa.stattools.periodogram.html
"""
from scipy.signal import periodogram
y = self.y.dropna() if not diffy else self.y.diff().dropna()
return periodogram(y.values)
def seasonal_decompose(self,diffy=False,**kwargs):
""" https://www.statsmodels.org/stable/generated/statsmodels.tsa.seasonal.seasonal_decompose.html
"""
self.infer_freq()
y = self.y if not diffy else self.y.diff()
X = pd.DataFrame({'y':y.values},index=self.current_dates)
X.index.freq = self.freq
return seasonal_decompose(X.dropna(),**kwargs)
def add_seasonal_regressors(self,*args,raw=True,sincos=False,dummy=False,drop_first=False):
self._adder()
if not (raw|sincos|dummy):
raise ValueError('at least one of raw, sincos, dummy must be True')
for s in args:
try:
if s in ('week','weekofyear'):
_raw = getattr(self.current_dates.dt.isocalendar(),s)
else:
_raw = getattr(self.current_dates.dt,s)
except AttributeError:
raise ValueError(f'cannot set "{s}". see possible values here: https://pandas.pydata.org/docs/reference/api/pandas.Series.dt.year.html')
try:
_raw.astype(int)
except ValueError:
f'{s} must return an int; use dummy = True to get dummies'
if s in ('week','weekofyear'):
_raw_fut = getattr(self.future_dates.dt.isocalendar(),s)
else:
_raw_fut = getattr(self.future_dates.dt,s)
if raw:
self.current_xreg[s] = _raw
self.future_xreg[s] = _raw_fut.to_list()
if sincos:
_cycles = _raw.max() # not the best way to do this but usually good enough
self.current_xreg[f'{s}sin'] = np.sin(np.pi*_raw/(_cycles/2))
self.current_xreg[f'{s}cos'] = np.cos(np.pi*_raw/(_cycles/2))
self.future_xreg[f'{s}sin'] = np.sin(np.pi*_raw_fut/(_cycles/2)).to_list()
self.future_xreg[f'{s}cos'] = np.cos(np.pi*_raw_fut/(_cycles/2)).to_list()
if dummy:
all_dummies = []
stg_df = pd.DataFrame({s:_raw.astype(str)})
stg_df_fut = pd.DataFrame({s:_raw_fut.astype(str)})
for c,v in pd.get_dummies(stg_df,drop_first=drop_first).to_dict(orient='series').items():
self.current_xreg[c] = v
all_dummies.append(c)
for c,v in pd.get_dummies(stg_df_fut,drop_first=drop_first).to_dict(orient='list').items():
if c in all_dummies:
self.future_xreg[c] = v
for c in all_dummies:
if c not in self.future_xreg.keys():
self.future_xreg[c] = [0]*len(self.future_dates)
def add_time_trend(self,called='t'):
self._adder()
self.current_xreg[called] = pd.Series(range(len(self.y)))
self.future_xreg[called] = list(range(len(self.y) + 1,len(self.y) + 1 + len(self.future_dates)))
assert len(self.future_xreg[called]) == len(self.future_dates)
def add_other_regressor(self,called,start,end):
self._adder()
if isinstance(start,str):
start = datetime.datetime.strptime(start,'%Y-%m-%d')
if isinstance(end,str):
end = datetime.datetime.strptime(end,'%Y-%m-%d')
self.current_xreg[called] = | pd.Series([1 if (x >= start) & (x <= end) else 0 for x in self.current_dates]) | pandas.Series |
import calendar
import datetime
import numpy as np
import pandas as pd
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_index_equal)
from numpy.testing import assert_allclose
import pytest
from pvlib.location import Location
from pvlib import solarposition, spa
from conftest import (requires_ephem, needs_pandas_0_17,
requires_spa_c, requires_numba)
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
end=datetime.datetime(2014,6,26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
# In 2003, DST in US was from April 6 to October 26
golden_mst = Location(39.742476, -105.1786, 'MST', 1830.14) # no DST issues possible
golden = Location(39.742476, -105.1786, 'America/Denver', 1830.14) # DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def expected_solpos():
return pd.DataFrame({'elevation': 39.872046,
'apparent_zenith': 50.111622,
'azimuth': 194.340241,
'apparent_elevation': 39.888378},
index=['2003-10-17T12:30:30Z'])
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=[['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z']])
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
| assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns]) | pandas.util.testing.assert_frame_equal |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ...config import options
from ...serialize import BoolField, AnyField, DataTypeField, Int32Field
from ..utils import parse_index, build_empty_df
from ..operands import DataFrameOperandMixin, DataFrameOperand, ObjectType
from ..merge import DataFrameConcat
class DataFrameReductionOperand(DataFrameOperand):
_axis = AnyField('axis')
_skipna = BoolField('skipna')
_level = AnyField('level')
_min_count = Int32Field('min_count')
_need_count = BoolField('need_count')
_dtype = DataTypeField('dtype')
_combine_size = Int32Field('combine_size')
def __init__(self, axis=None, skipna=None, level=None, min_count=None, need_count=None, dtype=None,
combine_size=None, gpu=None, sparse=None, **kw):
super(DataFrameReductionOperand, self).__init__(_axis=axis, _skipna=skipna, _level=level, _min_count=min_count,
_need_count=need_count, _dtype=dtype,
_combine_size=combine_size, _gpu=gpu, _sparse=sparse, **kw)
@property
def axis(self):
return self._axis
@property
def skipna(self):
return self._skipna
@property
def level(self):
return self._level
@property
def min_count(self):
return self._min_count
@property
def need_count(self):
return self._need_count
@property
def dtype(self):
return self._dtype
@property
def combine_size(self):
return self._combine_size
class DataFrameReductionMixin(DataFrameOperandMixin):
@classmethod
def _tile_one_chunk(cls, op):
df = op.outputs[0]
chk = op.inputs[0].chunks[0]
new_chunk_op = op.copy().reset_key()
chunk = new_chunk_op.new_chunk(op.inputs[0].chunks, shape=df.shape, index=chk.index,
index_value=df.index_value, dtype=df.dtype)
new_op = op.copy()
nsplits = tuple((s,) for s in chunk.shape)
return new_op.new_seriess(op.inputs, df.shape, nsplits=nsplits, chunks=[chunk],
index_value=df.index_value, dtype=df.dtype)
@classmethod
def tile(cls, op):
df = op.outputs[0]
in_df = op.inputs[0]
combine_size = op.combine_size or options.combine_size
if len(in_df.chunks) == 1:
return cls._tile_one_chunk(op)
n_rows, n_cols = in_df.chunk_shape
chunk_dtypes = []
if op.numeric_only and op.axis == 0:
cum_nsplits = np.cumsum((0,) + in_df.nsplits[0])
for i in range(len(cum_nsplits) - 1):
chunk_dtypes.append(build_empty_df(
in_df.dtypes[cum_nsplits[i]: cum_nsplits[i + 1]]).select_dtypes(np.number).dtypes)
# build reduction chunks
reduction_chunks = np.empty(op.inputs[0].chunk_shape, dtype=np.object)
for c in op.inputs[0].chunks:
new_chunk_op = op.copy().reset_key()
if op.min_count > 0:
new_chunk_op._need_count = True
new_chunk_op._object_type = ObjectType.dataframe
if op.axis == 0:
if op.numeric_only:
dtypes = chunk_dtypes[c.index[1]]
else:
dtypes = c.dtypes
reduced_shape = (1, len(dtypes))
index_value = parse_index(pd.RangeIndex(1))
dtypes = c.dtypes
else:
reduced_shape = (c.shape[0], 1)
index_value = c.index_value
dtypes = pd.Series(op.outputs[0].dtype)
reduction_chunks[c.index] = new_chunk_op.new_chunk([c], shape=reduced_shape,
dtypes=dtypes, index_value=index_value)
out_chunks = []
if op.axis is None or op.axis == 0:
for col in range(n_cols):
chunks = [reduction_chunks[i, col] for i in range(n_rows)]
out_chunks.append(cls.tree_reduction(chunks, op, combine_size, col))
elif op.axis == 1:
for row in range(n_rows):
chunks = [reduction_chunks[row, i] for i in range(n_cols)]
out_chunks.append(cls.tree_reduction(chunks, op, combine_size, row))
new_op = op.copy()
nsplits = (tuple(c.shape[0] for c in out_chunks),)
return new_op.new_seriess(op.inputs, df.shape, nsplits=nsplits, chunks=out_chunks,
dtype=df.dtype, index_value=df.index_value)
@classmethod
def tree_reduction(cls, chunks, op, combine_size, idx):
while len(chunks) > combine_size:
new_chunks = []
for i in range(0, len(chunks), combine_size):
chks = chunks[i: i + combine_size]
for j, c in enumerate(chunks):
c._index = (j,)
concat_op = DataFrameConcat(axis=op.axis, object_type=ObjectType.dataframe)
if op.axis == 0:
concat_index = parse_index(pd.RangeIndex(len(chks)))
concat_dtypes = chks[0].dtypes
concat_shape = (sum([c.shape[0] for c in chks]), chks[0].shape[1])
else:
concat_index = chks[0].index
concat_dtypes = pd.Series([c.dtypes[0] for c in chks])
concat_shape = (chks[0].shape[0], (sum([c.shape[1] for c in chks])))
chk = concat_op.new_chunk(chks, shape=concat_shape, index=(i,),
dtypes=concat_dtypes, index_value=concat_index)
if op.axis == 0:
reduced_shape = (1, chk.shape[1])
index_value = parse_index(pd.RangeIndex(1))
dtypes = chk.dtypes
else:
reduced_shape = (chk.shape[0], 1)
index_value = chk.index_value
dtypes = pd.Series(op.outputs[0].dtype)
new_op = op.copy().reset_key()
new_op._object_type = ObjectType.dataframe
new_chunks.append(new_op.new_chunk([chk], shape=reduced_shape, index=(i,), dtypes=dtypes,
index_value=index_value))
chunks = new_chunks
concat_op = DataFrameConcat(axis=op.axis, object_type=ObjectType.dataframe)
chk = concat_op.new_chunk(chunks, index=(idx,))
empty_df = build_empty_df(chunks[0].dtypes)
reduced_df = getattr(empty_df, getattr(cls, '_func_name'))(axis=op.axis, level=op.level,
numeric_only=op.numeric_only)
reduced_shape = (np.nan,) if op.axis == 1 else reduced_df.shape
new_op = op.copy().reset_key()
return new_op.new_chunk([chk], shape=reduced_shape, index=(idx,), dtype=reduced_df.dtype,
index_value=parse_index(reduced_df.index))
@classmethod
def execute(cls, ctx, op):
inputs = ctx[op.inputs[0].key]
if isinstance(inputs, tuple):
in_df, concat_count = inputs
count = concat_count.sum(axis=op.axis)
else:
in_df = inputs
count = 0
res = getattr(in_df, getattr(cls, '_func_name'))(axis=op.axis, level=op.level,
skipna=op.skipna, numeric_only=op.numeric_only)
if op.object_type == ObjectType.series:
if op.min_count > 0:
res[count < op.min_count] = np.nan
ctx[op.outputs[0].key] = res
else:
ctx[op.outputs[0].key] = res
else:
if op.need_count:
count = in_df.notnull().sum(axis=op.axis)
if op.axis == 0:
if op.min_count > 0:
ctx[op.outputs[0].key] = (pd.DataFrame(res).transpose(), pd.DataFrame(count).transpose())
else:
ctx[op.outputs[0].key] = pd.DataFrame(res).transpose()
else:
if op.min_count > 0:
ctx[op.outputs[0].key] = (pd.DataFrame(res), | pd.DataFrame(count) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = | pd.np.sum(p == 0) | pandas.np.sum |
import pandas as pd
import numpy as np
from collections import Counter
test = | pd.read_csv('./robust_log_test.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 11:00:56 2017
@author: 028375
"""
import pandas as pd
import numpy as np
begindate='20171001'
spotdate='20171018'
lastdate='20171017'
path0='F:\月结表\境内TRS\S201710\\'.decode('utf-8')
def TestTemplate(Status,Collateral,Position):
path1=('股衍境内TRS检验'+spotdate+'.xlsx').decode('utf-8')
path2=('股衍境内TRS估值'+lastdate+'.xlsx').decode('utf-8')
LastStatus=pd.read_excel(path0+path2,'账户状态'.decode('utf-8'),encoding="gb2312",keep_default_na=False)
LastStatus=LastStatus[['清单编号'.decode('utf-8'),'客户总预付金'.decode('utf-8'),'我方角度合约价值'.decode('utf-8')]]
LastStatus.columns=[['TradeID','LastCollateral','LastValue']]
Status=pd.merge(Status,LastStatus,how='outer',left_on='清单编号'.decode('utf-8'),right_on='TradeID')
Result=range(len(Status))
for i in range(len(Status)):
tmp1=Position[Position['合约编号'.decode('utf-8')]==Status['清单编号'.decode('utf-8')][Status.index[i]]]['持仓数量'.decode('utf-8')]
tmp2=Position[Position['合约编号'.decode('utf-8')]==Status['清单编号'.decode('utf-8')][Status.index[i]]]['最新价'.decode('utf-8')]
Result[i]=np.sum(tmp1*tmp2)
Result=pd.DataFrame(Result,columns=['Position'],index=Status.index)
Status['Position']=Result['Position']
wbw= | pd.ExcelWriter(path0+path1) | pandas.ExcelWriter |
# Ab initio Elasticity and Thermodynamics of Minerals
#
# Version 2.5.0 27/10/2021
#
# Comment the following three lines to produce the documentation
# with readthedocs
# from IPython import get_ipython
# get_ipython().magic('cls')
# get_ipython().magic('reset -sf')
import datetime
import os
import sys
import scipy
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
# from matplotlib import rc
import pandas as pd
import sympy as sym
import parame as pr
from scipy.optimize import curve_fit, fmin, minimize_scalar, minimize
from scipy.interpolate import UnivariateSpline, Rbf
from scipy import integrate
from plot import plot_class
from mineral_data import mineral, load_database, equilib, reaction,\
pressure_react, export, field, import_database, name_list
from mineral_data import ens, cor, py, coe, q, fo, ky, sill, andal, per, sp, \
mao, fmao, stv, cc, arag, jeff, jeff_fe, jeff_fe3p, jeff_feb
import_database()
mpl.rcParams['figure.dpi']= 80
class latex_class():
"""
Setup for the use of LaTeX for axis labels and titles; sets of parameters
for graphics output.
"""
def __init__(self):
self.flag=False
self.dpi=300
self.font_size=14
self.tick_size=12
self.ext='jpg'
mpl.rc('text', usetex=False)
def on(self):
self.flag=True
mpl.rc('text', usetex=True)
def off(self):
self.flag=False
mpl.rc('text', usetex=False)
def set_param(self, dpi=300, fsize=14, tsize=12, ext='jpg'):
"""
Args:
dpi: resolution of the graphics file (default 300)
fsize: size of the labels of the axes in points (default 14)
tsize: size of the ticks in points (default 12)
ext: extension of the graphics file (default 'jpg'); this argument
is only used in those routines where the name of the file is
automatically produced by the program (e.g. check_poly or
check_spline functions). In other cases, the extension is
directly part of the name of the file given as argument to
the function itself, and 'ext' is ignored.
"""
self.dpi=dpi
self.font_size=fsize
self.tick_size=tsize
self.ext=ext
def get_dpi(self):
return self.dpi
def get_fontsize(self):
return self.font_size
def get_ext(self):
return self.ext
def get_tsize(self):
return self.tick_size
class flag:
def __init__(self,value):
self.value=value
self.jwar=0
def on(self):
self.value=True
def off(self):
self.value=False
def inc(self):
self.jwar += 1
def reset(self):
self.jwar=0
class verbose_class():
def __init__(self,value):
self.flag=value
def on(self):
self.flag=True
print("Verbose mode on")
def off(self):
self.flag=False
print("Verbose mode off")
class BM3_error(Exception):
pass
class vol_corr_class:
def __init__(self):
self.flag=False
self.v0_init=None
def on(self):
self.flag=True
def off(self):
self.flag=False
def set_volume(self,vv):
self.v0_init=vv
class data_info():
"""
Stores information about the current settings
"""
def __init__(self):
self.min_static_vol=None
self.max_static_vol=None
self.static_points=None
self.min_freq_vol=None
self.max_freq_vol=None
self.freq_points=None
self.min_select_vol=None
self.max_select_vol=None
self.select_points=None
self.freq_sets=None
self.fit_type='No fit'
self.min_vol_fit=None
self.max_vol_fit=None
self.fit_points=None
self.fit_degree=None
self.fit_smooth=None
self.k0=None
self.kp=None
self.v0=None
self.temp=None
self.k0_static=None
self.kp_static=None
self.v0_static=None
self.popt=None
self.popt_orig=None
self.min_names=name_list.mineral_names
self.title=None
def show(self):
"""
Prints information about the current settings stored in the classes
"""
if self.title !=None:
print(self.title)
print("\nCurrent settings and results\n")
if self.min_static_vol != None:
print("Static data ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_static_vol, self.max_static_vol, self.static_points))
if self.min_freq_vol != None:
print("Frequency volume range ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_freq_vol, self.max_freq_vol, self.freq_points))
if self.min_select_vol != None:
print("Selected freq. sets ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_select_vol, self.max_select_vol, self.select_points))
print("Frequency sets: %s" % str(self.freq_sets))
if self.fit_type != 'No fit':
if self.fit_type=='poly':
print("\nFit of frequencies ** type: %s, degree: %d" \
% (self.fit_type, self.fit_degree))
else:
print("\nFit of frequencies ** type: %s, degree: %d, smooth: %2.1f" \
% (self.fit_type, self.fit_degree, self.fit_smooth))
print(" min, max volumes: %8.4f, %8.4f; points %d" %\
(self.min_vol_fit, self.max_vol_fit, self.fit_points))
else:
print("No fit of frequencies")
if supercell.flag:
print("\n*** This is a computation performed on SUPERCELL data")
print(" (SCELPHONO and QHA keywords in CRYSTAL). Number of cells: %3i" % supercell.number)
if self.k0_static != None:
print("\n*** Static EoS (BM3) ***")
print("K0: %6.2f GPa, Kp: %4.2f, V0: %8.4f A^3" %\
(self.k0_static, self.kp_static, self.v0_static))
if static_range.flag:
print("\n*** Static EoS is from a restricted volume range:")
print("Minimum volume: %8.3f" % static_range.vmin)
print("Maximum volume: %8.3f" % static_range.vmax)
if p_stat.flag:
print("\n*** Static EoS from P(V) data ***")
print("Data points num: %3i" % p_stat.npoints)
print("Volume range: %8.4f, %8.4f (A^3)" % (p_stat.vmin, p_stat.vmax))
print("Pressure range: %5.2f, %5.2f (GPa)" % (p_stat.pmax, p_stat.pmin))
print("EoS -- K0: %6.2f (GPa), Kp: %4.2f, V0: %8.4f (A^3)" % (p_stat.k0,\
p_stat.kp, p_stat.v0))
print("Energy at V0: %12.9e (hartree)" % p_stat.e0)
if self.k0 != None:
print("\n** BM3 EoS from the last computation, at the temperature of %5.2f K **" % self.temp)
print("K0: %6.2f GPa, Kp: %4.2f, V0: %8.4f A^3" %\
(self.k0, self.kp, self.v0))
if not f_fix.flag:
print("Kp not fixed")
else:
print("Kp fixed")
if exclude.ex_mode != []:
uniq=np.unique(exclude.ex_mode)
print("\nZone center excluded modes: %s" % str(uniq))
else:
print("\nNo zone center excluded modes")
if disp.ex_flag:
uniq=np.unique(disp.excluded_list)
print("Off center excluded modes: %s" % str(uniq))
else:
print("No off center excluded modes")
if kieffer.flag==True:
print("\nKieffer model on; frequencies %5.2f %5.2f %5.2f cm^-1" %\
(kieffer.kief_freq_inp[0], kieffer.kief_freq_inp[1], \
kieffer.kief_freq_inp[2]))
else:
print("\nKieffer model off")
if anharm.flag:
print("\nAnharmonic correction for mode(s) N. %s" % str(anharm.mode).strip('[]'))
print("Brillouin flag(s): %s" % str(anharm.brill).strip('[]'))
if disp.flag:
print("\n--------------- Phonon dispersion --------------------")
print("\nDispersion correction activated for the computation of entropy and")
print("specific heat:")
print("Number of frequency sets: %3i" % disp.nset)
if disp.nset > 1:
if disp.fit_type == 0:
print("Polynomial fit of the frequencies; degree: %3i " % disp.fit_degree)
else:
print("Spline fit of the frequencies; degree: %3i, smooth: %3.1f"\
% (disp.fit_degree, disp.fit_type))
print("Number of off-centered modes: %5i" % disp.f_size)
if disp.eos_flag:
print("\nThe phonon dispersion is used for the computation of the bulk modulus")
print("if the bulk_dir or the bulk_modulus_p functions are used, the latter")
print("in connection with the noeos option.")
if disp.fit_vt_flag:
print("The required V,T-fit of the free energy contribution from")
print("the off-centered modes is ready. Fit V,T-powers: %3i, %3i"
% (disp.fit_vt_deg_v, disp.fit_vt_deg_t))
else:
print("The required V,T-fit of the free energy contribution from")
print("the off-centered mode is NOT ready.")
else:
print("\nThe phonon dispersion correction is not used for the computation")
print("of the bulk modulus")
if disp.thermo_vt_flag & (disp.nset > 1):
print("\nVT-phonon dispersion correction to the thermodynamic properties")
elif (not disp.thermo_vt_flag) & (disp.nset > 1):
print("\nT-phonon dispersion correction to the thermodynamic properties")
print("Use disp.thermo_vt_on() to activate the V,T-correction")
print("\n --------------------------------------------------------")
if lo.flag:
out_lo=(lo.mode, lo.split)
df_out=pd.DataFrame(out_lo, index=['Mode', 'Split'])
df_out=df_out.T
df_out['Mode']=np.array([int(x) for x in df_out['Mode']], dtype=object)
print("\nFrequencies corrected for LO-TO splitting.\n")
if verbose.flag:
print(df_out.to_string(index=False))
print("---------------------------------------------")
print("\n**** Volume driver for volume_dir function ****")
print("Delta: %3.1f; degree: %2i; left: %3.1f; right: %3.1f, Kp_fix: %s; t_max: %5.2f"\
% (volume_ctrl.delta, volume_ctrl.degree, volume_ctrl.left, volume_ctrl.right,\
volume_ctrl.kp_fix, volume_ctrl.t_max))
print("EoS shift: %3.1f; Quad_shrink: %2i; T_dump: %3.1f; Dump fact.: %2.1f, T_last %4.1f" % \
(volume_ctrl.shift, volume_ctrl.quad_shrink, volume_ctrl.t_dump, volume_ctrl.dump,\
volume_ctrl.t_last))
print("Upgrade shift: %r" % volume_ctrl.upgrade_shift)
print("\n**** Volume driver for volume_from_F function ****")
print("In addition to the attributes set in the parent volume_control_class:")
print("shift: %3.1f, flag: %r, upgrade_shift: %r" % (volume_F_ctrl.get_shift(), \
volume_F_ctrl.get_flag(), volume_F_ctrl.get_upgrade_status()))
print("\n**** Numerical T-derivatives driver class (delta_ctrl) ****")
if not delta_ctrl.adaptive:
print("Delta: %3.1f" % delta_ctrl.delta)
print("Degree: %3i" % delta_ctrl.degree)
print("N. of points %3i" % delta_ctrl.nump)
else:
print("Adaptive scheme active:")
print("T_min, T_max: %4.1f, %6.1f K" % (delta_ctrl.tmin, delta_ctrl.tmax))
print("Delta_min, Delta_max: %4.1f, %6.1f K" % (delta_ctrl.dmin, delta_ctrl.dmax))
print("Degree: %3i" % delta_ctrl.degree)
print("N. of points %3i" % delta_ctrl.nump)
if verbose.flag:
print("\n--------- Database section ---------")
print("Loaded phases:")
print(self.min_names)
class exclude_class():
"""
Contains the list of modes to be excluded from the
calculation of the Helmholtz free energy.
It can be constructed by using the keyword EXCLUDE
in the input.txt file.
"""
def __init__(self):
self.ex_mode=[]
self.ex_mode_keep=[]
self.flag=False
def __str__(self):
return "Excluded modes:" + str(self.ex_mode)
def add(self,modes):
"""
Args:
n : can be a scalar or a list of modes to be excluded
"""
if type(modes) is list:
self.ex_mode.extend(modes)
self.flag=True
elif type(modes) is int:
self.ex_mode.append(modes)
self.flag=True
else:
print("** Warning ** exclude.add(): invalid input type")
return
def restore(self):
"""
Restores all the excluded modes
"""
if self.flag:
self.ex_mode_keep=self.ex_mode
self.ex_mode=[]
self.flag=False
def on(self):
self.ex_mode=self.ex_mode_keep
self.flag=True
class fix_flag:
def __init__(self,value=0.):
self.value=value
self.flag=False
def on(self,value=4):
self.value=value
self.flag=True
def off(self):
self.value=0.
self.flag=False
class fit_flag:
def __init__(self):
pass
def on(self):
self.flag=True
def off(self):
self.flag=False
class spline_flag(fit_flag):
"""
Sets up the spline fit of the frequencies as functions of
the volume of the unit cell.
Several variables are defined:
1. flag: (boolean); if True, frequencies are fitted with splines
2. degree: degree of the spline
3. smooth: *smoothness* of the spline
4. flag_stack: (boolean) signals the presence of the spline stack
5. pol_stack: it is the stack containing parameters for the spline fit
Note:
The spline stack can be set up and initialized by using the keyword\
SPLINE under the keyword FITVOL in the *input.txt* file
Methods:
"""
def __init__(self,flag=False,degree=3,smooth=0):
super().__init__()
self.flag=False
self.flag_stack=False
self.degree=degree
self.smooth=smooth
self.pol_stack=np.array([])
def on(self):
super().on()
def off(self):
super().off()
def set_degree(self,degree):
self.degree=int(degree)
def set_smooth(self,smooth):
self.smooth=smooth
def stack(self):
self.pol_stack=freq_stack_spline()
self.flag_stack=True
def vol_range(self,v_ini, v_fin, npoint):
self.fit_vol=np.linspace(v_ini, v_fin, npoint)
class poly_flag(fit_flag):
def __init__(self,flag=False,degree=2):
super().__init__()
self.flag=flag
self.flag_stack=False
self.degree=degree
self.pol_stack=np.array([])
def on(self):
super().on()
def off(self):
super().off()
def set_degree(self,degree):
self.degree=int(degree)
def stack(self):
self.pol_stack=freq_stack_fit()
self.flag_stack=True
def vol_range(self,v_ini, v_fin, npoint):
self.fit_vol=np.linspace(v_ini, v_fin, npoint)
class kieffer_class():
def __str__(self):
return "Application of the Kieffer model for acoustic phonons"
def __init__(self,flag=False):
self.flag=False
self.stack_flag=False
self.kief_freq=None
self.kief_freq_inp=None
self.t_range=None
self.f_list=None
self.input=False
def stack(self, t_range, f_list):
self.t_range=t_range
self.f_list=f_list
def get_value(self,temperature):
free=scipy.interpolate.interp1d(self.t_range, self.f_list, kind='quadratic')
return free(temperature)*zu
def on(self):
self.flag=True
print("Kieffer correction on")
if disp.flag:
disp.flag=False
print("Phonon dispersion is deactivated")
if not self.stack_flag:
free_stack_t(pr.kt_init,pr.kt_fin,pr.kt_points)
def off(self):
self.flag=False
print("Kieffer correction off")
def freq(self,f1,f2,f3):
self.kief_freq_inp=np.array([f1, f2, f3])
self.kief_freq=self.kief_freq_inp*csl*h/kb
free_stack_t(pr.kt_init,pr.kt_fin,pr.kt_points)
def plot(self):
plt.figure()
plt.plot(self.t_range, self.f_list, "k-")
plt.xlabel("Temperature (K)")
plt.ylabel("F free energy (J/mol apfu)")
plt.title("Free energy from acustic modes (Kieffer model)")
plt.show()
class bm4_class():
"""
Set up and information for a 4^ order Birch-Murnaghan EoS (BM4)
It provides:
1. energy: function; Volume integrated BM4 (V-BM4)
2. pressure: function; BM4
3. bm4_static_eos: BM4 parameters for the static energy
calculation as a function of V
4. en_ini: initial values for the BM4 fit
5. bm4_store: BM4 parameters from a fitting at a given
temperature
methods:
"""
def __init__(self):
self.flag=False
self.start=True
self.energy=None
self.pressure=None
self.en_ini=None
self.bm4_static_eos=None
self.bm4_store=None
def __str__(self):
return "BM4 setting: " + str(self.flag)
def on(self):
"""
Switches on the BM4 calculation
"""
self.flag=True
if self.start:
self.energy, self.pressure=bm4_def()
self.start=False
def estimates(self,v4,e4):
"""
Estimates initial values of BM4 parameters for the fit
"""
ini=init_bm4(v4,e4,4.0)
new_ini,dum=curve_fit(v_bm3, v4, e4, \
p0=ini,ftol=1e-15,xtol=1e-15)
kpp=(-1/new_ini[1])*((3.-new_ini[2])*\
(4.-new_ini[2])+35./9.)*1e-21/conv
self.en_ini=[new_ini[0], new_ini[1],\
new_ini[2], kpp, new_ini[3]]
k0_ini=new_ini[1]*conv/1e-21
print("\nBM4-EoS initial estimate:")
print("\nV0: %6.4f" % self.en_ini[0])
print("K0: %6.2f" % k0_ini)
print("Kp: %6.2f" % self.en_ini[2])
print("Kpp: %6.2f" % self.en_ini[3])
print("E0: %8.6e" % self.en_ini[4])
def store(self,bm4st):
"""
Stores BM4 parameters from a fit a given temperature
"""
self.bm4_store=bm4st
def upload(self,bm4_eos):
"""
Loads the parameters from the static calculation
(that are then stored in bm4_static_eos)
"""
self.bm4_static_eos=bm4_eos
def upgrade(self):
"""
Uses the stored values of parameters [from the application of
store()] to upgrade the initial estimation done with estimates()
"""
self.en_ini=self.bm4_store
def off(self):
"""
Switches off the BM4 calculation
"""
self.flag=False
def status(self):
"""
Informs on the status of BM4 (on, or off)
"""
print("\nBM4 setting: %s " % self.flag)
class gamma_class():
"""
Store coefficients of a gamma(T) fit
"""
def __init__(self):
self.flag=False
self.degree=1
self.pol=np.array([])
def upload(self,deg,pcoef):
self.flag=True
self.degree=deg
self.pol=pcoef
class super_class():
"""
Store supercell data: number of cells on which the frequencies
computation was done. To be used in connection with CRYSTAL
calculations performed with SCELPHONO and QHA keywords.
Default value: 1
"""
def __init__(self):
self.number=1
self.flag=False
def set(self,snum):
self.flag=True
self.number=snum
print("\n*** Supercell *** Number of cells: %3i" % snum)
def reset(self):
self.flag=False
self.number=1
print("\n*** Supercell deactivated *** Number of cells set to 1")
class lo_class():
"""
LO/TO splitting correction.
The class stores a copy of the original TO frequencies, the modes
affected by LO/TO splitting and the splitting values.
Modes are identified by their progressive number (starting from 0) stored
in the *mode* attribute.
When the correction is activated, new values of frequencies (*f_eff*)
are computed for the relevant modes, according to the formula:
f_eff = 2/3 f_TO + 1/3 f_LO
where f_LO = f_TO + split.
Correction is activated by the keyword LO in the input.txt file,
followed by the name of the file containing the splitting data (two
columns: mode number and the corresponding split in cm^-1).
Internally, the methods *on* and *off* switch respectively on and off
the correction. The method *apply* does the computation of the frequencies
*f_eff*.
"""
def __init__(self):
self.flag=False
self.mode=np.array([])
self.split=np.array([])
self.data_freq_orig=np.array([])
self.data_freq=np.array([])
def on(self):
self.apply()
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
self.flag=True
print("Frequencies corrected for LO-TO splitting")
def off(self):
self.flag=False
self.data_freq=np.copy(self.data_freq_orig)
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
print("LO-TO splitting not taken into account")
def apply(self):
for ifr in np.arange(lo.mode.size):
im=lo.mode[ifr]
for iv in int_set:
freq_lo=self.data_freq_orig[im,iv+1]+self.split[ifr]
self.data_freq[im,iv+1]=(2./3.)*self.data_freq_orig[im,iv+1]\
+(1./3.)*freq_lo
class anh_class():
def __init__(self):
self.flag=False
self.disp_off=0
def off(self):
self.flag=False
exclude.restore()
if disp.input_flag:
disp.free_exclude_restore()
print("Anharmonic correction is turned off")
print("Warning: all the excluded modes are restored")
def on(self):
self.flag=True
self.flag_brill=False
for im, ib in zip(anharm.mode, anharm.brill):
if ib == 0:
exclude.add([im])
elif disp.input_flag:
disp.free_exclude([im])
self.flag_brill=True
if self.flag_brill:
disp.free_fit_vt()
print("Anharmonic correction is turned on")
class static_class():
"""
Defines the volume range for the fit of the static EoS
If not specified (default) such range is defined from the
volumes found in the static energies file.
"""
def __init__(self):
self.flag=False
def set(self, vmin, vmax):
"""
Sets the minimum and maximum volumes for the V-range
Args:
vmin: minimum volume
vmax: maximum volume
"""
self.vmin=vmin
self.vmax=vmax
def off(self):
"""
Restores the original V-range (actually, it switches off the volume
selection for the fit of the static EoS)
"""
self.flag=False
def on(self):
"""
It switches on the volume selection for the fit of the static EoS
Note:
The minimum and maximum V-values are set by the 'set' method
of the class
"""
self.flag=True
class p_static_class():
def __init__(self):
self.flag=False
self.vmin=None
self.vmax=None
self.pmin=None
self.pmax=None
self.npoints=None
self.k0=None
self.kp=None
self.v0=None
self.e0=None
class volume_control_class():
"""
Defines suitable parameters for the volume_dir function
"""
def __init__(self):
self.degree=2
self.delta=2.
self.t_max=500.
self.shift=0.
self.t_dump=0.
self.dump=1.
self.quad_shrink=4
self.kp_fix=False
self.debug=False
self.upgrade_shift=False
self.skew=1.
self.t_last=0.
self.t_last_flag=False
self.v_last=None
def set_degree(self, degree):
"""
Sets the degree of polynomial used to fit the (P(V)-P0)^2 data.
The fitted curve is the minimized to get the equilibrium volume
at each T and P.
For each of the single parameter revelant in this class, there exist
a specific method to set its value. The method set_all can be used to
set the values of a number of that, at the same time, by using appropriate
keywords as argument. The arguments to set_all are:
Args:
degree: degree of the fitting polynomial (default=2)
delta: volume range where the minimum of the fitting function
is to be searched (default=2.)
skew: the Volume range is centered around the equilibrium
volume approximated by the EoS-based new_volume function
The symmetry around such point can be controlled by
the skew parameter (default=1.: symmetric interval)
shift: Systematic shift from the new_volume estimation (default=0.)
t_max: In the initial estimation of the volume at P/T with the EoS-based
new_volume function, the Kp is refined if T < t_max.
If T > t_max and kp_fix=True, Kp is fixed at the value
refined at t_max (default=500K)
kp_fix: See t_max (default=True)
quad_shrink: if degree=2, it restricts the volume range around the
approximated volume found. The new range is
delta/quad_shrink (default=4)
upgrade_shift: at the end of the computation, the difference between
the volume found and the initial one (from the EoS-
based new_volume function) is calculated. The shift
attribute is then upgraded if upgrade_shift is True
(default=False)
debug: if True, the (P(V)-P0)**2 function is plotted as a function
of V (default=False)
t_dump: temperature over which a dumping on the shift parameter is
applied (default=0.)
dump: dumping on the shift parameter (shift=shift/dump; default=1.)
t_last: if t_last > 10., the last volume computed is used as the
initial guess value (vini) for the next computation at a
new temperature.
"""
self.degree=degree
def set_delta(self, delta):
self.delta=delta
def set_tmax(self,tmax):
self.t_max=tmax
def set_skew(self, skew):
self.left=skew+1
self.right=(skew+1)/skew
def kp_on(self):
self.kp_fix=True
def kp_off(self):
self.kp_fix=False
def debug_on(self):
self.debug=True
def debug_off(self):
self.debug=False
def set_shift(self, shift):
self.shift=shift
def upgrade_shift_on(self):
self.upgrade_shift=True
def upgrade_shift_off(self):
self.ugrade_shift=False
def set_shrink(self, shrink):
self.quad_shrink=shrink
def shift_reset(self):
self.shift=0.
def set_t_dump(self,t_dump=0., dump=1.0):
self.t_dump=t_dump
self.dump=dump
def set_t_last(self, t_last):
self.t_last=t_last
def set_all(self,degree=2, delta=2., skew=1., shift=0., t_max=500.,\
quad_shrink=4, kp_fix=True, upgrade_shift=False, debug=False,\
t_dump=0., dump=1., t_last=0.):
self.degree=degree
self.delta=delta
self.t_max=t_max
self.kp_fix=kp_fix
self.debug=debug
self.left=skew+1
self.right=(skew+1)/skew
self.shift=shift
self.quad_shrink=quad_shrink
self.upgrade_shift=upgrade_shift
self.skew=skew
self.t_last=t_last
class volume_F_control_class():
"""
Class controlling some parameters relevant for the computation of
volume and thermal expansion by using the volume_from_F function.
Precisely, the initial volume (around which the refined volume vref
is to be searched) is set to vini+shift, where vini is the
output from the volume_dir, whereas shift is from this class.
Shift is computed as the difference vref-vini; it can be upgraded
provided the flag upgrade_shift is set to True.
"""
def __init__(self):
self.shift=0.
self.upgrade_shift=False
self.flag=False
def on(self):
self.flag=True
def off(self):
self.flag=False
def set_shift(self, sh):
self.shift=sh
def upgrade_on(self):
self.upgrade_shift=True
def upgrade_off(self):
self.upgrade_shift=False
def get_shift(self):
return self.shift
def get_upgrade_status(self):
return self.upgrade_shift
def get_flag(self):
return self.flag
class delta_class():
"""
Control parameters for the numerical evaluation of the first and second
derivatives of the Helmholtz free energy as a function of T. They are
relevant for the entropy_v function that computes both the entropy and
specific heat at a fixed volume, as well as the computation of thermal
expansion.
Initial values of delta, degree and number of points are read
from the parameters file 'parame.py'
New values can be set by the methods set_delta, set_degree and set_nump
of the class. values can be retrieved by the corresponding 'get' methods.
The reset method set the default values.
An adaptive scheme is activated by the method adaptive_on (adaptive_off
deactivates the scheme). In this case the delta value is computed as a function
of temperature (T). Precisely:
delta=delta_min+(T-t_min)*(delta_max-delta_min)/(t_max-t_min)
delta=delta_min if T < t_min
delta=delta_max if T > t_max
The paramaters t_min, t_max, delta_min and delta_max can be set by the
adaptive_set method (default values 50, 1000, 10, 50, respectively)
"""
def __init__(self):
self.delta=pr.delta
self.nump=pr.nump
self.degree=pr.degree
self.adaptive=False
self.tmin=50.
self.tmax=1000.
self.dmin=10.
self.dmax=50.
def adaptive_on(self):
self.adaptive=True
def adaptive_off(self):
self.adaptive=False
def adaptive_set(self, tmin=50., tmax=1000., dmin=10., dmax=50.):
self.tmin=tmin
self.tmax=tmax
self.dmin=dmin
self.dmax=dmax
def set_delta(self,delta):
self.delta=delta
print("Delta T value, for the computation of entropy, Cv and thermal expansion set to %4.1f" \
% self.delta)
def set_degree(self,degree):
self.degree=degree
print("Degree for the computation of entropy, Cv and thermal expansion set to %3i" \
% self.degree)
def set_nump(self,nump):
self.nump=nump
print("N. points for the computation of entropy, Cv and thermal expansion set to %3i" \
% self.nump)
def get_delta(self, tt=300):
if not self.adaptive:
return self.delta
else:
if tt < self.tmin:
return self.dmin
elif tt > self.tmax:
return self.dmax
else:
return self.dmin+((tt-self.tmin)/(self.tmax-self.tmin))*(self.dmax-self.dmin)
def get_degree(self):
return self.degree
def get_nump(self):
return self.nump
def reset(self):
self.delta=pr.delta
self.degree=pr.degree
self.nump=pr.nump
print("\nDefault parameters for the computation of entropy, Cv and thermal expansion:")
print("Delta: %3.1f" % self.delta)
print("Degree: %3i" % self.degree)
print("Num. points: %3i" % self.nump)
class disp_class():
"""
Sets up the computation for the inclusion of phonon dispersion effects
the EoS computation or for the calculation of all the thermodynamic
properties.
The class is relevant and activated if the DISP keyword is contained
in the input.txt input file.
Dispersion effects can be switched on or off by using the on() and off()
methods.
Note:
To apply the phonon dispersion correction to computation of an equation
of state, the method eos_on() must be invoked [the method eos_off() switches
it off]. In this case, more than one volume must be present in the input
file for dispersion.
Note:
If phonon frequencies are computed for several values of the unit cell volume,
in order to apply a VT-phonon dispersion correction to thermodynamic properties,
the method thermo_vt_on() must be invoked [the method thermo_vt_off() switches it off].
On the contrary, a T-phonon dispersion correction is applied (it is assumed that
phonon frequencies do not change with volume).
Note:
The method free_fit_vt() must be used to get the F(V,T) function for
off-center phonon modes.
"""
def __init__(self):
self.input_flag=False
self.flag=False
self.eos_flag=False
self.thermo_vt_flag=False
self.freq=None
self.deg=None
self.fit_type=None
self.input=False
self.fit_vt_flag=False
self.fit_vt=None
self.temp=None
self.error_flag=False
self.ex_flag=False
self.free_min_t=10.
self.fit_vt_deg_t=4
self.fit_vt_deg_v=4
self.fit_t_deg=6
self.free_nt=24
self.free_disp=True
def on(self):
self.flag=True
if anharm.disp_off > 0:
anharm.mode=np.copy(anharm.mode_orig)
anharm.brill=np.copy(anharm.brill_orig)
anharm.nmode=anharm.nmode_orig
print("Dispersion correction activated")
if kieffer.flag:
kieffer.flag=False
print("Kieffer correction is deactivated")
def off(self):
self.flag=False
print("Dispersion correction off")
if anharm.flag:
mode_a=np.array([])
mode_b=np.array([])
for ia, ib in zip(anharm.mode, anharm.brill):
if ib == 1:
print("\nWarning: the anharmonic mode n. %2i has Brillouin flag" % ia)
print("equal to 1; it should not be considered if the dispersion")
print("correction is deactivated.\n")
anharm.disp_off=anharm.disp_off+1
else:
mode_a=np.append(mode_a, ia)
mode_b=np.append(mode_b, ib)
if anharm.disp_off == 1:
anharm.nmode_orig=anharm.nmode
anharm.mode_orig=np.copy(anharm.mode)
anharm.brill_orig=np.copy(anharm.brill)
anharm.nmode=mode_a.size
anharm.mode=np.copy(mode_a)
anharm.brill=np.copy(mode_b)
print("List of anharmonic modes considered: %s" % anharm.mode)
def eos_on(self):
if self.flag :
if not self.error_flag:
self.eos_flag=True
print("\nPhonon dispersion correction for bulk_dir or bulk_modulus_p computations")
else:
print("Only 1 volume found in the 'disp' files; NO disp_eos possible")
else:
if self.input_flag:
print("Phonon dispersion is not on; use disp.on() to activate")
else:
print("No input of dispersion data; eos_on ignored")
def eos_off(self):
self.eos_flag=False
print("No phonon dispersion correction for bulk_dir computation")
def thermo_vt_on(self):
if self.nset > 1:
self.thermo_vt_flag=True
print("VT-dispersion correction of thermodynamic properties\n")
if not self.fit_vt_flag:
self.free_fit_vt()
else:
print("One volume only found in the DISP file")
def thermo_vt_off(self):
self.thermo_vt_flag=False
print("T-dispersion correction of thermodynamic properties")
print("No volume dependence considered")
def freq_spline_fit(self):
"""
It requests and makes spline fits of the frequencies of the off
center modes as function of volumes.
Relevant parameters for the fit (degree and smooth parameters) are
specified in the appropriate input file.
"""
self.spline=np.array([])
ord_vol=list(np.argsort(self.vol))
vol = [self.vol[iv] for iv in ord_vol]
for ifr in np.arange(self.f_size):
freq=self.freq[:,ifr]
freq=[freq[iv] for iv in ord_vol]
ifit=UnivariateSpline(vol, freq, k=self.fit_degree, s=self.fit_type)
self.spline=np.append(self.spline, ifit)
def freq_fit(self):
"""
It requests and makes polynomial fits of the frequencies of the off
center modes as function of volumes.
The relevant parameter for the fit (degree) is specified in the
appropriate input file.
"""
self.poly=np.array([])
for ifr in np.arange(self.f_size):
if self.nset > 1:
freq=self.freq[:,ifr]
ifit=np.polyfit(self.vol, freq, self.fit_degree)
self.poly=np.append(self.poly,ifit)
else:
self.poly=np.append(self.poly, (0, self.freq[:,ifr][0]))
if self.nset == 1:
self.poly=self.poly.reshape(self.f_size,2)
else:
self.poly=self.poly.reshape(self.f_size,self.fit_degree+1)
def freq_func(self,ifr,vv):
fit=self.poly[ifr]
return np.polyval(fit,vv)
def freq_spline_func(self,ifr,vv):
fit=self.spline[ifr](vv)
return fit.item(0)
def check(self,ifr):
"""
Check of the frequencies fit quality for a specified mode
Args:
ifr: sequence number of the mode to be checked
"""
v_list=np.linspace(np.min(disp.vol), np.max(disp.vol),40)
if self.fit_type == 0:
f_list=[self.freq_func(ifr,iv) for iv in v_list]
else:
f_list=[self.freq_spline_func(ifr,iv) for iv in v_list]
tlt="Check fit for mode N. "+ str(ifr)
plt.figure()
plt.plot(v_list,f_list, "k-")
plt.plot(disp.vol, disp.freq[:,ifr],"b*")
plt.xlabel("Volume (A^3)")
plt.ylabel("Frequency (cm^-1)")
plt.title(tlt)
plt.show()
def check_multi(self, fr_l):
"""
Check of the frequencies fit quality for a list of modes
Args:
fr_l: list of sequence numbers of the various modes to be checked
Example:
>>> disp.check_multi([0, 1, 2, 3])
>>> disp.check_multi(np.arange(10))
"""
for ifr in fr_l:
self.check(ifr)
def free_exclude(self,ex_list):
"""
Excludes the indicated off-center modes from the computation of the
free energy
Args:
ex_list: list of modes to be excluded
Note:
Even a single excluded mode must be specified as a list; for instance
disp.free_exclude([0])
Note:
after the exclusion of some modes, the F(V,T) function has
to be recomputed by the free_fit_vt method
"""
if not self.input_flag:
print("no input of dispersion data")
return
self.ex_flag=True
self.excluded_list=ex_list
print("Off center modes excluded: ", self.excluded_list)
print("Compute a new disp.free_fit_vt surface")
def free_exclude_restore(self):
"""
The excluded modes are restored
"""
self.ex_flag=False
print("All off centered mode restored")
print("Compute a new disp.free_fit_vt surface")
def free(self,temp,vv):
nf_list=np.arange(self.f_size)
if self.fit_type == 0:
freq=(self.freq_func(ifr,vv) for ifr in nf_list)
else:
freq=(self.freq_spline_func(ifr,vv) for ifr in nf_list)
d_deg=self.deg
wgh=self.w_list
enz=0.
fth=0.
idx=0
nfreq=0
for ifr in freq:
if not self.ex_flag:
nfreq=nfreq+1
fth=fth+d_deg[idx]*np.log(1-np.e**(ifr*e_fact/temp))*wgh[idx]
enz=enz+d_deg[idx]*ifr*ez_fact*wgh[idx]
else:
if not (idx in self.excluded_list):
nfreq=nfreq+1
fth=fth+d_deg[idx]*np.log(1-np.e**(ifr*e_fact/temp))*wgh[idx]
enz=enz+d_deg[idx]*ifr*ez_fact*wgh[idx]
idx=idx+1
return enz+fth*kb*temp/conv
def free_fit(self,mxt,vv,disp=True):
fit_deg=self.fit_t_deg
nt=24
nt_plot=50
tl=np.linspace(10,mxt,nt)
free=np.array([])
for it in tl:
ifree=self.free(it,vv)
free=np.append(free,ifree)
fit=np.polyfit(tl,free,fit_deg)
self.fit=fit
if disp:
tl_plot=np.linspace(10,mxt,nt_plot)
free_plot=self.free_func(tl_plot)
print("Phonon dispersion correction activated")
print("the contribution to the entropy and to the")
print("specific heat is taken into account.\n")
if verbose.flag:
plt.figure()
plt.plot(tl,free,"b*",label="Actual values")
plt.plot(tl_plot, free_plot,"k-",label="Fitted curve")
plt.legend(frameon=False)
plt.xlabel("T (K)")
plt.ylabel("F (a.u.)")
plt.title("Helmholtz free energy from off-centered modes")
plt.show()
def free_fit_ctrl(self, min_t=10., t_only_deg=4, degree_v=4, degree_t=4, nt=24, disp=True):
"""
Free fit driver: sets the relevant parameters for the fit computation
of the F(V,T) function, on the values of F calculated on a grid
of V and T points.
Args:
min_t: minimum temperature for the construction of the
VT grid (default=10.)
degree_v: maximum degree of V terms of the surface (default=4)
degree_t: maximum degree ot T terms of the sarface (default=4)
t_only_degree: degree of the T polynomial for a single volume
phonon dispersion (default=4)
nt: number of points along the T axis for the definition of the
(default=24) grid
disp: it True, a plot of the surface is shown (default=True)
Note:
The method does not execute the fit, but it defines the most
important parameters. The fit is done by the free_fit_vt() method.
Note:
the volumes used for the construction of the VT grid are those
provided in the appropriate input file. They are available
in the disp.vol variable.
"""
self.free_min_t=min_t
self.fit_t_deg=t_only_deg
self.fit_vt_deg_t=degree_t
self.fit_vt_deg_v=degree_v
self.free_nt=nt
self.free_disp=disp
if self.input_flag:
self.free_fit_vt()
self.free_fit(self.temp,self.vol[0])
def set_tmin(self,tmin):
self.min_t=tmin
def set_nt(self,nt):
self.nt=nt
def free_fit_vt(self):
self.fit_vt_flag=True
min_t=self.free_min_t
nt=self.free_nt
disp=self.free_disp
deg_t=self.fit_vt_deg_t
deg_v=self.fit_vt_deg_v
max_t=self.temp
pvv=np.arange(deg_v+1)
ptt=np.arange(deg_t+1)
p_list=np.array([],dtype=int)
maxvt=np.max([deg_v, deg_t])
for ip1 in np.arange(maxvt+1):
for ip2 in np.arange(maxvt+1):
i1=ip2
i2=ip1-ip2
if i2 < 0:
break
ic=(i1, i2)
if (i1 <= deg_v) and (i2 <= deg_t):
p_list=np.append(p_list,ic)
psize=p_list.size
pterm=int(psize/2)
self.p_list=p_list.reshape(pterm,2)
x0=np.ones(pterm)
t_list=np.linspace(min_t,max_t,nt)
v_list=self.vol
nv=len(v_list)
if nv == 1:
print("\n**** WARNING ****\nOnly one volume found in the 'disp' data files;")
print("NO V,T-fit of F is possible")
self.eos_off()
self.error_flag=True
return
free_val=np.array([])
for it in t_list:
for iv in v_list:
ifree=self.free(it,iv)
free_val=np.append(free_val,ifree)
free_val=free_val.reshape(nt,nv)
vl,tl=np.meshgrid(v_list,t_list)
vl=vl.flatten()
tl=tl.flatten()
free_val=free_val.flatten()
fit, pcov = curve_fit(self.free_vt_func, [vl, tl], free_val, p0 = x0)
self.fit_vt=fit
error=np.array([])
for it in t_list:
for iv in v_list:
f_calc=self.free_vt(it,iv)
f_obs=self.free(it,iv)
ierr=(f_calc-f_obs)**2
error=np.append(error,ierr)
mean_error=np.sqrt(np.mean(error))
max_error=np.sqrt(np.max(error))
print("V,T-fit of the Helmholtz free energy contribution from the off-centered modes")
print("V, T powers of the fit: %3i %3i" % (self.fit_vt_deg_v, self.fit_vt_deg_t))
print("Mean error: %5.2e" % mean_error)
print("Maximum error: %5.2e" % max_error)
if self.ex_flag:
print("Excluded modes: ", self.excluded_list)
if disp:
t_plot=np.linspace(min_t,max_t,40)
v_plot=np.linspace(np.min(vl),np.max(vl),40)
v_plot,t_plot=np.meshgrid(v_plot,t_plot)
v_plot=v_plot.flatten()
t_plot=t_plot.flatten()
h_plot=self.free_vt_func([v_plot, t_plot], *fit)
h_plot=h_plot.reshape(40,40)
v_plot=v_plot.reshape(40,40)
t_plot=t_plot.reshape(40,40)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111,projection='3d', )
ax.scatter(tl,vl,free_val,c='r')
ax.plot_surface(t_plot, v_plot, h_plot)
ax.set_xlabel("Temperature", labelpad=7)
ax.set_ylabel("Volume", labelpad=7)
ax.set_zlabel('F(T,V)', labelpad=8)
plt.show()
def free_vt_func(self,data,*par):
vv=data[0]
tt=data[1]
nterm=self.p_list.shape[0]
func=0.
for it in np.arange(nterm):
pv=self.p_list[it][0]
pt=self.p_list[it][1]
func=func+par[it]*(vv**pv)*(tt**pt)
return func
def free_vt(self,temp,volume):
return self.free_vt_func([volume,temp],*self.fit_vt)
def free_func(self,temp):
free_disp=np.polyval(self.fit,temp)
return free_disp
class volume_delta_class():
"""
Defines a suitable V range for the numerical evaluation of the
derivatives of any quantity with respect to V.
The V-range (delta) is obtained by multiplying the static equilibrium
volume (V0; which is computed by the static function) with a factor read
from the parame.py parameters' file; such parameter (frac) is stored
in the vd.frac variable and can also be set by the set_frac method.
The method set_delta computes delta, provided a volume is input.
When delta is computed, the vd.flag is set to True and its values
is used in several functions computing derivatives. On the contrary,
if vd.flag is set to False (use the method off), the delta
value is read from the parameters' file (pr.delta_v).
"""
def __init__(self):
self.v0=None
self.flag=False
self.delta=None
self.frac=pr.v_frac
def set_delta(self,vol=0.):
"""
Sets the V-delta value for the calculation of derivatives with
respect to V.
Args:
vol: if vol > 0.1, computes delta for the volume vol;
if vol < 0.1, vol is set to the default value stored
in the v0 variable.
"""
if vol < 0.1:
if self.v0 != None:
self.flag=True
self.delta=self.frac*self.v0
else:
war1="Warning: No volume provided for the set_delta method\n"
war2=" The delta value is read from the parameters file"
war=war1+war2+": %5.4f"
print(war % pr.delta_v)
self.flag=False
else:
self.delta=vol*self.frac
self.flag=True
self.v0=vol
def set_frac(self,frac):
self.frac=frac
def on(self):
self.flag=True
def off(self):
self.flag=False
class thermal_expansion_class():
"""
Interface for the computation of thermal expansion by different algorithms.
The method 'compute' performs the calculation by calling different functions
according to the 'method' keyword. Similarly, the method 'compute_serie'
performs the calculation of alpha as a function of temperature.
Several default parameters for the calculation are provided, which can
be set by the method 'set'.
The algortithms which are currently implemented can be listed by the method
'info'
The 'compute_serie' method perform the calculation of the thermal
expansion in a given T-range and, optionally, performs a power
series fit on the computed values. Data from the fit can optionally be
loaded in the internal database if a phase name is provided.
Note:
For the method 'k_alpha_eos', this class uses a specialized
plotting function from the plot.py module, whose parameters are
controlled by the plot.set_param method.
"""
def __init__(self):
self.method='k_alpha_dir'
self.nt=12
self.fix=0
self.fit=False
self.tex=False
self.save=False
self.phase=''
self.title=True
def set(self, method='k_alpha_dir', nt=12, fit=False, tex=False, save=False,\
phase='', title=True, fix=0.):
self.method=method
self.nt=nt
self.fix=fix
self.fit=fit
self.tex=tex
self.save=save
self.phase=phase
self.title=title
def info(self):
print("\nMethods currently implemented\n")
print("k_alpha_dir: computes alpha from the product K*alpha, through the")
print(" derivative of P with respect to T, at constant V")
print(" At any T and P, K and P are directly computed from")
print(" the Helmholtz free energy function derivatives. No EoS")
print(" is involved at any step;")
print("k_alpha_eos: same as k_alpha_dir, but pressures and bulk moduli")
print(" are computed from an EoS;")
print("alpha_dir: the computation is perfomed through the derivative")
print(" of the unit cell volume with respect to V; volumes are")
print(" calculated without reference to any EoS, by the function")
print(" volume_dir.")
def compute(self, tt, pp, method='default', fix=0, prt=False):
"""
Thermal expansion at a specific temperature and pressure
Args:
tt: temperature (K)
pp: pressure (GPa)
method: 3 methods are currently implemented ('k_alpha_dir',
'k_alpha_eos' and 'alpha_dir'); default 'k_alpha_dir'
fix: relevant for method 'k_alpha_eos' (default 0., Kp not fixed)
prt: relevant for method 'k_alpha_eos'; it controls printout
(default False)
"""
if method=='default':
method=self.method
if fix==0:
fix=self.fix
if method=='k_alpha_dir':
if prt:
alpha_dir_from_dpdt(tt, pp, prt)
else:
alpha,k,vol=alpha_dir_from_dpdt(tt, pp, prt)
return alpha
elif method=='k_alpha_eos':
exit=False
if not prt:
exit=True
alpha=thermal_exp_p(tt, pp, False, exit, fix=fix)
return alpha[0]
else:
thermal_exp_p(tt, pp, plot=False, ex=exit, fix=fix)
elif method=='alpha_dir':
alpha=alpha_dir(tt,pp)
if prt:
print("Thermal expansion: %6.2e K^-1" % alpha)
else:
return alpha
else:
msg="*** Warning: method "+method+" not implemented"
print(msg)
def compute_serie(self, tmin, tmax, pressure=0, nt=0, fit='default', tex='default',\
title='default', save='default', phase='default', method='default',\
prt=True, fix=0):
"""
Thermal expansion in a T-range
Args:
tmin, tmax: minimum and maximum temperature in the range
pressure: pressure (GPa); default 0
nt: number of points in the T-range; if nt=0, the default is chosen (12)
method: one of the three methods currently implemented
fit: if True, a power series fit is performed
phase: if fit is True and a phase name is specified (label), the data
from the power series fit are loaded in the internal database
fix: relevant for the method 'k_alpha_eos'; if fix is not 0.,
Kp is fixed at the specified value
title: if True, a title of the plot is provided
tex: if tex is True, laTeX formatting is provided
prt: relevant for the method 'k_alpha_eos'
save: if True, the plot is saved in a file
Note:
if save is True and method is 'k_alpha_eos', the name of the file
where the plot is saved is controlled by the plot.name and plot.ext variables.
The file resolution is controlled by the plot.dpi variable.
The appropriate parameters can be set by the set_param method
of the plot instance of the plot_class class (in the plot.py module)
Example:
>>> plot.set_param(dpi=200, name='alpha_k_eos_serie')
>>> thermal_expansion.compute_serie(100, 500, method='k_alpha_eos', save=True)
"""
if nt==0:
nt=self.nt
if fit=='default':
fit=self.fit
if tex=='default':
tex=self.tex
if title=='default':
title=self.title
if save=='default':
save=self.save
if phase=='default':
phase=self.phase
if method=='default':
method=self.method
t_list=np.linspace(tmin, tmax, nt)
t_plot=np.linspace(tmin, tmax, nt*10)
if method=='k_alpha_dir':
if fit and phase == '':
alpha_fit=alpha_dir_from_dpdt_serie(tmin, tmax, nt, pressure, fit, phase, save,\
title, tex)
return alpha_fit
else:
alpha_dir_from_dpdt_serie(tmin, tmax, nt, pressure, fit, phase, save,\
title, tex)
elif method=='alpha_dir':
if not fit:
alpha_dir_serie(tmin, tmax, nt, pressure, fit, prt=prt)
else:
alpha_fit=alpha_dir_serie(tmin, tmax, nt, pressure, fit, prt=prt)
if phase != '':
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
print("")
else:
return alpha_fit
elif method=='k_alpha_eos':
alpha_list=np.array([])
for it in t_list:
ia=self.compute(it, pressure, method='k_alpha_eos', fix=fix)
alpha_list=np.append(alpha_list, ia)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
if fit:
alpha_fit_plot=alpha_dir_fun(t_plot,*alpha_fit)
tit=''
if tex and title:
tit=r'Thermal expansion (method k\_alpha\_eos)'
elif title:
tit='Thermal expansion (method k_alpha_eos)'
if fit:
x=[t_list, t_plot]
y=[alpha_list, alpha_fit_plot]
style=['k*', 'k-']
lab=['Actual values', 'Power series fit']
if tex:
plot.multi(x,y,style, lab, xlab='Temperature (K)',\
ylab=r'$\alpha$ (K$^{-1}$)', title=tit, tex=True, save=save)
else:
plot.multi(x,y,style, lab, xlab='Temperature (K)',\
title=tit, ylab='Alpha (K$^{-1}$)', save=save)
else:
if tex:
plot.simple(t_list, alpha_list, xlab='Temperature (K)',\
ylab=r'$\alpha$ (K$^{-1}$)', title=tit, tex=True, save=save)
else:
plot.simple(t_list, alpha_list, xlab='Temperature (K)',\
title=tit, ylab='Alpha (K$^{-1}$)', save=save)
if fit:
if phase != '':
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
print("")
else:
return alpha_fit
else:
msg="*** Warning: method "+method+" not implemented"
print(msg)
# reads in data file. It requires a pathname to the folder
# containing data
def read_file(data_path):
global volume, energy, deg, data_vol_freq, num_set_freq
global num_mode, ini, int_set, int_mode, data_vol_freq_orig
global temperature_list, pcov, data_freq, path, data_file
global data, zu, apfu, power, lpow, power_a, lpow_a, mass
global flag_eos, flag_cp, flag_alpha, flag_err, flag_exp, flag_mass
global data_cp_exp, data_p_file, static_e0
flag_eos=False
flag_cp=False
flag_alpha=False
flag_err=False
flag_exp=False
flag_fit=False
flag_mass=False
flag_super=False
flag_static, flag_volume, flag_freq, flag_ini, flag_fu, flag_set, flag_p_static\
= False, False, False, False, False, False, False
path=data_path
input_file=data_path+'/'+'input.txt'
line_limit=100
with open(input_file) as fi:
jc=0
l0=['']
while (l0 !='END') and (jc < line_limit):
str=fi.readline()
lstr=str.split()
l0=''
if lstr !=[]:
l0=lstr[0].rstrip()
if l0 !='#':
if l0=='STATIC':
data_file=data_path+'/'+fi.readline()
data_file=data_file.rstrip()
flag_static=os.path.isfile(data_file)
elif l0=='PSTATIC':
data_p_file=data_path+'/'+fi.readline()
data_p_file=data_p_file.rstrip()
static_e0=fi.readline().rstrip()
flag_p_static=os.path.isfile(data_p_file)
print("\n*** INFO *** P/V static data found: use p_static")
print(" function to get a BM3-EoS")
elif l0=='VOLUME':
data_file_vol_freq=data_path+'/'+fi.readline()
data_file_vol_freq=data_file_vol_freq.rstrip()
flag_volume=os.path.isfile(data_file_vol_freq)
elif l0=='FREQ':
data_file_freq=data_path+'/'+fi.readline()
data_file_freq=data_file_freq.rstrip()
flag_freq=os.path.isfile(data_file_freq)
elif l0=='EXP':
data_file_exp=data_path+'/'+fi.readline()
data_file_exp=data_file_exp.rstrip()
flag_exp=os.path.isfile(data_file_exp)
elif l0=='LO':
lo_freq_file=data_path+'/'+fi.readline()
lo_freq_file=lo_freq_file.rstrip()
lo.flag=True
elif l0=='FITVOL':
fit_type=fi.readline()
fit_vol=fi.readline()
flag_fit=True
elif l0=='FU':
zu=fi.readline()
flag_fu=True
elif l0=='MASS':
mass=fi.readline()
flag_mass=True
elif l0=='SET':
istr=fi.readline()
while istr.split()[0] =='#':
istr=fi.readline()
int_set=istr
flag_set=True
elif l0=='TEMP':
temperature_list=fi.readline()
flag_eos=True
elif l0=='TITLE':
title=fi.readline().rstrip()
info.title=title
elif l0=='INI':
ini=fi.readline()
flag_ini=True
elif l0=='CP':
power=fi.readline()
flag_cp=True
elif l0=='ALPHA':
power_a=fi.readline()
flag_alpha=True
elif l0=='EXCLUDE':
exclude.restore()
ex_mode=fi.readline()
ex_mode=list(map(int, ex_mode.split()))
exclude.add(ex_mode)
elif l0=='KIEFFER':
kieffer.input=True
kieffer.flag=True
kief_freq=fi.readline()
kief_freq_inp=list(map(float, kief_freq.split()))
kief_freq=np.array(kief_freq_inp)*csl*h/kb
kieffer.kief_freq=kief_freq
kieffer.kief_freq_inp=kief_freq_inp
elif l0=='ANH':
anharm.nmode=int(fi.readline().rstrip())
anharm.mode=np.array([],dtype=int)
anharm.wgt=np.array([],dtype=int)
anharm.brill=np.array([],dtype=int)
for im in np.arange(anharm.nmode):
line=fi.readline().rstrip()
mw=list(map(int, line.split()))
mode=int(mw[0])
brill=int(mw[1])
wgt=int(mw[2])
anharm.mode=np.append(anharm.mode, mode)
anharm.wgt=np.append(anharm.wgt, wgt)
anharm.brill=np.append(anharm.brill, brill)
anharm.flag=True
elif l0=='SUPER':
line=fi.readline().rstrip()
line_val=list(map(int, line.split()))
snum=line_val[0]
static_vol=line_val[1]
flag_static_vol=False
if static_vol == 0:
flag_static_vol=True
flag_super=True
elif l0=='DISP':
disp.input_flag=True
disp.flag=True
disp.input=True
disp_file=data_path+'/'+fi.readline()
disp_info=data_path+'/'+fi.readline()
disp_file=disp_file.rstrip()
disp_info=disp_info.rstrip()
fd=open(disp_info)
line=fd.readline().rstrip().split()
disp.molt=int(line[0])
disp.fit_degree=int(line[1])
disp.fit_type=float(line[2])
disp.temp=float(line[3])
line=fd.readline().rstrip().split()
disp.numf=list(map(int, line))
line=fd.readline().rstrip().split()
disp.wgh=list(map(int, line))
line=fd.readline().rstrip().split()
disp.vol=list(map(float, line))
fd.close()
w_list=np.array([],dtype=int)
for iw in np.arange(disp.molt):
wl=np.repeat(disp.wgh[iw],disp.numf[iw])
w_list=np.append(w_list,wl)
disp.w_list=w_list
disp.f_size=disp.w_list.size
jc=jc+1
if jc>=line_limit:
print("\nWarning: END keyword not found")
if not flag_volume or not flag_freq or not (flag_static or flag_p_static):
print("\nError: one or more data file not found, or not assigned"
" in input")
flag_err=True
return
if not flag_fu:
print("\nError: mandatory FU keyword not found")
flag_err=True
return
if not flag_set:
print("\nError: mandatory SET keyword not found")
flag_err=True
return
fi.close()
if flag_view_input.value:
view_input(input_file)
print("\n-------- End of input file -------\n")
flag_view_input.off()
int_set=int_set.rstrip()
int_set=list(map(int, int_set.split()))
info.freq_sets=int_set
if flag_eos:
temperature_list=temperature_list.rstrip()
temperature_list=list(map(float,temperature_list.split()))
if flag_ini:
ini=ini.rstrip()
ini=list(map(float, ini.split()))
ini[1]=ini[1]*1e-21/conv
zus=list(map(int,zu.rstrip().split()))
zu=zus[0]
apfu=zus[1]
if flag_fit:
fit_type=fit_type.rstrip()
fit_vol=fit_vol.rstrip()
fit_vol=list(map(float, fit_vol.split()))
v_ini=fit_vol[0]
v_fin=fit_vol[1]
nv=int(fit_vol[2])
if fit_type=='SPLINE':
flag_spline.on()
flag_spline.set_degree(fit_vol[3])
flag_spline.set_smooth(fit_vol[4])
flag_spline.vol_range(v_ini, v_fin, nv)
info.fit_type='spline'
info.fit_degree=flag_spline.degree
info.fit_smooth=flag_spline.smooth
info.min_vol_fit=v_ini
info.max_vol_fit=v_fin
info.fit_points=nv
elif fit_type=='POLY':
flag_poly.on()
flag_poly.set_degree(fit_vol[3])
flag_poly.vol_range(v_ini, v_fin, nv)
info.fit_type='poly'
info.fit_degree=flag_poly.degree
info.min_vol_fit=v_ini
info.max_vol_fit=v_fin
info.fit_points=nv
if flag_super:
supercell.set(snum)
if flag_cp:
power=power.rstrip()
power=list(map(float, power.split()))
lpow=len(power)
test_cp=[ipw in cp_power_list for ipw in power]
if not all(test_cp):
print("WARNING: the power list for the Cp fit is not consistent")
print(" with the Perplex database")
print("Allowed powers:", cp_power_list)
print("Given powers:", power)
print("")
if flag_alpha:
power_a=power_a.rstrip()
power_a=list(map(float, power_a.split()))
lpow_a=len(power_a)
test_al=[ipw in al_power_list for ipw in power_a]
if not all(test_al):
print("WARNING: the power list for the alpha fit is not consistent")
print(" with the Perplex database")
print("Allowed powers:", al_power_list)
print("Given powers:", power_a)
print("")
if flag_mass:
mass=float(mass.rstrip())
b_flag=False
if anharm.flag:
anharm_setup()
for im,ib in zip(anharm.mode, anharm.brill):
if ib == 0:
exclude.add([im])
else:
disp.free_exclude([im])
b_flag=True
if disp.flag:
disp.freq=np.array([])
disp_data=np.loadtxt(disp_file)
disp.deg=disp_data[:,0]
nset=len(disp.vol)
disp.nset=nset
for iv in np.arange(nset):
disp.freq=np.append(disp.freq, disp_data[:,iv+1])
disp.freq=disp.freq.reshape(nset,disp.f_size)
if disp.fit_type == 0:
disp.freq_fit()
else:
disp.freq_spline_fit()
disp.free_fit(disp.temp,disp.vol[0])
data=np.loadtxt(data_file)
if flag_p_static:
static_e0=float(static_e0)
data_vol_freq_orig=np.loadtxt(data_file_vol_freq)
lo.data_freq=np.loadtxt(data_file_freq)
lo.data_freq_orig=np.copy(lo.data_freq)
info.min_freq_vol=min(data_vol_freq_orig)
info.max_freq_vol=max(data_vol_freq_orig)
info.freq_points=len(data_vol_freq_orig)
if flag_exp:
data_cp_exp=np.loadtxt(data_file_exp)
volume=data[:,0]
energy=data[:,1]
if flag_super:
if flag_static_vol:
volume=volume*snum
energy=energy*snum
info.min_static_vol=min(volume)
info.max_static_vol=max(volume)
info.static_points=len(volume)
deg=lo.data_freq[:,0]
num_set_freq=lo.data_freq.shape[1]-1
num_mode=lo.data_freq.shape[0]-1
int_mode=np.arange(num_mode+1)
if flag_super:
deg=deg/supercell.number
if not flag_ini:
ini=init_bm3(volume,energy)
data_vol_freq=[]
for iv in int_set:
data_vol_freq=np.append(data_vol_freq, data_vol_freq_orig[iv])
int_set_new=np.array([],dtype='int32')
ind=data_vol_freq.argsort()
for ind_i in ind:
int_set_new=np.append(int_set_new, int_set[ind_i])
if not np.array_equal(int_set, int_set_new):
print("\nWarning ** Volume and frequencies lists have been sorted")
print(" indexing: ", ind)
print("")
int_set=int_set_new
data_vol_freq.sort()
info.min_select_vol=min(data_vol_freq)
info.max_select_vol=max(data_vol_freq)
info.select_points=len(data_vol_freq)
volume_ctrl.set_all()
if flag_fit:
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
if lo.flag:
lo_data=np.loadtxt(lo_freq_file)
lo.mode=lo_data[:,0].astype(int)
lo.split=lo_data[:,1].astype(float)
lo.on()
if disp.input and kieffer.input:
kieffer.flag=False
print("\nBoth Kieffer and phonon dispersion data were found in the input file")
print("The Kieffer model is therefore deactivated")
if b_flag:
print("")
disp.free_fit_vt()
def view():
"""
View input file (input.txt)
"""
input_file=path+"/input.txt"
view_input(input_file)
def view_input(input_file):
line_limit=1000
print("\nInput file\n")
with open(input_file) as fi:
jc=0
l0=['']
while (l0 !='END') and (jc < line_limit):
str=fi.readline()
lstr=str.split()
if lstr !=[]:
l0=lstr[0].rstrip()
if l0 !='#':
print(str.rstrip())
jc=jc+1
def reload_input(path):
reset_flag()
read_file(path)
static()
def load_disp(disp_info, disp_file):
"""
Load files containing data for the phonon dispersion correction. These
are the same files that could be also specified under the keyword DISP
in the input.txt file.
Args:
disp_info: name of the info file
disp_file: name of the frequencies' file
"""
disp.input_flag=True
disp.flag=True
disp.input=True
disp_file=path_orig+'/'+disp_file
disp_info=path_orig+'/'+disp_info
fd=open(disp_info)
line=fd.readline().rstrip().split()
disp.molt=int(line[0])
disp.fit_degree=int(line[1])
disp.fit_type=float(line[2])
disp.temp=float(line[3])
line=fd.readline().rstrip().split()
disp.numf=list(map(int, line))
line=fd.readline().rstrip().split()
disp.wgh=list(map(int, line))
line=fd.readline().rstrip().split()
disp.vol=list(map(float, line))
fd.close()
disp.error_flag=False
if len(disp.vol) == 1:
disp.error_flag=True
w_list=np.array([],dtype=int)
for iw in np.arange(disp.molt):
wl=np.repeat(disp.wgh[iw],disp.numf[iw])
w_list=np.append(w_list,wl)
disp.w_list=w_list
disp.f_size=disp.w_list.size
disp.freq=np.array([])
disp_data=np.loadtxt(disp_file)
disp.deg=disp_data[:,0]
nset=len(disp.vol)
disp.nset=nset
for iv in np.arange(nset):
disp.freq=np.append(disp.freq, disp_data[:,iv+1])
disp.freq=disp.freq.reshape(nset,disp.f_size)
if disp.fit_type == 0:
disp.freq_fit()
else:
disp.freq_spline_fit()
disp.free_fit(disp.temp,disp.vol[0])
print("Phonon dispersion data loaded from the file %s" % disp_file)
print("Info data from the file %s" % disp_info)
print("Phonon frequencies are computed at the volume(s) ", disp.vol)
print("\nUse disp.free_fit_ctrl to get free energy surfaces F(T) or F(V,T)")
def set_fix(fix=4.):
"""
Sets Kp to a value and keeps it fixed during fitting of EoS
Args:
fix (optional): Kp value. Default 4.
if fix=0, Kp if fixed to the last computed value stored in info.kp
The flag f_fit.flag is set to True
"""
if fix == 0:
fix=info.kp
f_fix.on(fix)
def reset_fix():
"""
Resets the fix Kp option: f_fit.flag=False
"""
f_fix.off()
def fix_status():
"""
Inquires about the setting concerning Kp
"""
print("Fix status: %r" % f_fix.flag)
if f_fix.flag:
print("Kp fixed at %4.2f" % f_fix.value )
def set_spline(degree=3,smooth=5, npoint=16):
"""
Sets spline fits of the frequencies as function of volume
Args:
degree (optional): degree of the spline (default: 3)
smooth (optional): smoothness of the spline (default: 5)
npoint (optional): number of points of the spline function
(default: 16)
"""
dv=0.2
flag_spline.on()
flag_poly.off()
flag_spline.set_degree(degree)
flag_spline.set_smooth(smooth)
fit_vol_exists=True
try:
flag_spline.fit_vol
except AttributeError:
fit_vol_exists=False
if not fit_vol_exists:
set_volume_range(min(data_vol_freq)-dv,max(data_vol_freq)+dv,npoint,\
prt=True)
else:
set_volume_range(min(flag_spline.fit_vol),max(flag_spline.fit_vol),npoint)
flag_spline.stack()
info.fit_type='spline'
info.fit_degree=degree
info.fit_smooth=smooth
info.fit_points=npoint
info.min_vol_fit=min(flag_spline.fit_vol)
info.max_vol_fit=max(flag_spline.fit_vol)
def set_poly(degree=4,npoint=16):
"""
Sets polynomial fits of the frequencies as function of volume
Args:
degree (optional): degree of the spline (default: 4)
npoint (optional): number of points of the polynomial function
(default: 16)
"""
dv=0.2
flag_poly.on()
flag_spline.off()
flag_poly.set_degree(degree)
fit_vol_exists=True
try:
flag_poly.fit_vol
except AttributeError:
fit_vol_exists=False
if not fit_vol_exists:
set_volume_range(min(data_vol_freq)-dv,max(data_vol_freq)+dv,npoint, \
prt=True)
else:
set_volume_range(min(flag_poly.fit_vol),max(flag_poly.fit_vol),npoint)
flag_poly.stack()
info.fit_type='poly'
info.fit_degree=degree
info.fit_points=npoint
info.min_vol_fit=min(flag_poly.fit_vol)
info.max_vol_fit=max(flag_poly.fit_vol)
def set_volume_range(vini,vfin,npoint=16,prt=False):
"""
Defines a volume range for the fitting of frequencies and EoS
in the case that SPLINE or POLY fits have been chosen
Args:
vini: minimum volume
vfin: maximum volume
npoint (optional): number of points in the volume range
"""
if flag_poly.flag:
flag_poly.vol_range(vini,vfin,npoint)
flag_poly.stack()
info.fit_points=npoint
info.min_vol_fit=min(flag_poly.fit_vol)
info.max_vol_fit=max(flag_poly.fit_vol)
if prt:
print("Volume range %8.4f - %8.4f defined for 'POLY' fit" %\
(vini, vfin))
elif flag_spline.flag:
flag_spline.vol_range(vini,vfin,npoint)
flag_spline.stack()
info.fit_points=npoint
info.min_vol_fit=min(flag_spline.fit_vol)
info.max_vol_fit=max(flag_spline.fit_vol)
if prt:
print("Volume range %8.4f - %8.4f defined for 'SPLINE' fit" %\
(vini, vfin))
else:
print("No fit of frequencies active\nUse set_poly or set_spline\n")
def fit_status():
if flag_poly.flag or flag_spline.flag:
print("Fit of frequencies is active")
if flag_spline.flag:
print("Spline fit: degree %2d, smooth: %3.1f" \
% (flag_spline.degree, flag_spline.smooth))
print("Volume range: %5.2f - %5.2f, points=%d" % \
(min(flag_spline.fit_vol), max(flag_spline.fit_vol), \
flag_spline.fit_vol.size))
else:
print("Polynomial fit: degree %2d" % flag_poly.degree)
print("Volume range: %5.2f - %5.2f, points=%d" % \
(min(flag_poly.fit_vol), max(flag_poly.fit_vol), \
flag_poly.fit_vol.size))
else:
print("Fitting is off")
def fit_off():
flag_poly.off()
flag_spline.off()
info.fit_type='No fit'
def quick_start(path):
"""
Quick start of the program.
Reads the input files found under the folder 'path'
whose name is written in the 'quick_start.txt' file
(found in the master folder).
Executes read_file; static (static equation of state)
and stacks data for the application of the Kieffer model,
if required with the optional 'KIEFFER' keyword in input.txt
"""
read_file(path)
static(plot=False)
if kieffer.flag:
free_stack_t(pr.kt_init, pr.kt_fin, pr.kt_points)
if verbose.flag:
print("Results from the Kieffer model for acoustic branches:")
print("plot of the Helmholtz free energy as a function of T.")
print("Temperature limits and number of points defined in parame.py")
kieffer.plot()
else:
print("Kieffer model for the acoustic branches activated")
def v_bm3(vv,v0,k0,kp,c):
"""
Volume integrated Birch-Murnaghan equation (3^rd order)
Args:
vv: volume
v0: volume at the minimum of the energy
k0: bulk modulus
kp: derivative of k0 with respect to P
c: energy at the minimum
Returns:
the energy at the volume vv
"""
v0v=(np.abs(v0/vv))**(2/3)
f1=kp*(np.power((v0v-1.),3))
f2=np.power((v0v-1.),2)
f3=6.-4*v0v
return c+(9.*v0*k0/16.)*(f1+f2*f3)
def bm3(vv,v0,k0,kp):
"""
Birch-Murnaghan equation (3^rd order)
Args:
vv: volume
v0: volume at the minimum of the energy
k0: bulk modulus
kp: derivative of k0 with respect to P
Returns:
the pressure at the volume vv
"""
v0v7=np.abs((v0/vv))**(7/3)
v0v5=np.abs((v0/vv))**(5/3)
v0v2=np.abs((v0/vv))**(2/3)
f1=v0v7-v0v5
f2=(3/4)*(kp-4)*(v0v2-1)
return (3*k0/2)*f1*(1+f2)
def bmx_tem(tt,**kwargs):
"""
V-BMx (volume integrated) fit at the selected temperature
Args:
tt: temperature
Keyword Args:
fix: if fix > 0.1, kp is fixed to the value 'fix'
during the optimization of the EoS.
(this is a valid option only for the BM3 fit,
but it is ignored for a BM4 EoS)
Returns:
1. free energy values at the volumes used for the fit
2. optimized v0, k0, kp, (kpp), and c
3. covariance matrix
Note:
bmx_tem optimizes the EoS according to several
possible options specified elsewhere:
1. kp fixed or free
2. frequencies not fitted, or fitted by
polynomials or splines
3. 3^rd or 4^th order BM EoS
Note:
bmx_tem includes energy contributions from static and vibrational
optical modes; acoustic contributions from the modified Kieffer
model are included, provided the KIEFFER keyword is in the input
file; contributions from anharmonic modes are included, provided
the ANH keyword is in the input file. NO dispersion correction
is included (even is the DISP keyword is provided).
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
flag_x=False
volb=data_vol_freq
if flag_poly.flag:
volb=flag_poly.fit_vol
elif flag_spline.flag:
volb=flag_spline.fit_vol
if f_fix.flag:
fix=f_fix.value
flag_x=True
p0_f=[ini[0],ini[1],ini[3]]
if fixpar:
if fix_value < 0.1:
flag_x=False
else:
fix=fix_value
flag_x=True
p0_f=[ini[0],ini[1],ini[3]]
if flag_poly.flag or flag_spline.flag:
free_energy=free_fit(tt)
else:
free_energy=free(tt)
if (flag_x) and (not bm4.flag):
pterm, pcov_term = curve_fit(lambda volb, v0, k0, c: \
v_bm3(volb, v0, k0, fix, c), \
volb, free_energy, p0=p0_f, \
ftol=1e-15, xtol=1e-15)
pterm=np.append(pterm,pterm[2])
pterm[2]=fix
else:
if bm4.flag:
if f_fix.flag:
reset_fix()
fix_status()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pterm, pcov_term= curve_fit(bm4.energy, volb, free_energy,\
method='dogbox',p0=bm4.en_ini, ftol=1e-18, xtol=3.e-16,gtol=1e-18)
bm4.store(pterm)
else:
pterm, pcov_term = curve_fit(v_bm3, volb, free_energy, \
p0=ini, ftol=1e-15, xtol=1e-15)
return [free_energy, pterm, pcov_term]
def bulk_conversion(kk):
"""
Bulk modulus unit conversion (from atomic units to GPa)
"""
kc=kk*conv/1e-21
print("Bulk modulus: %8.4e a.u. = %6.2f GPa" % (kk, kc))
def stop():
"""
used to exit from the program in case of fatal exceptions
"""
while True:
print("Program will be terminated due to errors in processing data")
answ=input('Press enter to quit')
sys.exit(1)
def bm4_def():
V0=sym.Symbol('V0',real=True,positive=True)
V=sym.Symbol('V',real=True,positive=True)
f=sym.Symbol('f',real=True)
kp=sym.Symbol('kp',real=True)
ks=sym.Symbol('ks',real=True)
k0=sym.Symbol('k0',real=True)
P=sym.Symbol('P',real=True,positive=True)
E0=sym.Symbol('E0',real=True)
c=sym.Symbol('c',real=True)
f=((V0/V)**sym.Rational(2,3)-1)/2
P=3*k0*f*((1+2*f)**sym.Rational(5,2))*(1+sym.Rational(3,2)*(kp-4.)*f +\
sym.Rational(3,2)*(k0*ks+(kp-4.)*(kp-3.)+sym.Rational(35,9))*(f**2))
E=sym.integrate(P,V)
E0=E.subs(V,V0)
E=E0-E+c
bm4_energy=sym.lambdify((V,V0,k0,kp,ks,c),E,'numpy')
bm4_pressure=sym.lambdify((V,V0,k0,kp,ks),P,'numpy')
return bm4_energy, bm4_pressure
def init_bm4(vv,en,kp):
"""
Function used to estimate the initial parameters of a V-integrated BM4
EoS. The function is used by the method "estimates" of the bm4 class.
The estimation is done on the basis of a previous BM3 optimization
whose initial parameters are provided by the current function.
Args:
vv (list): volumes
en (list): static energies at the corresponding volumes vv
kp:initail value assigned to kp
Returns:
"ini" list of V-integrated EoS parameters (for a BM3) estimated by a
polynomial fit: v_ini, k0_ini, kp, e0_ini.
Note: such parameters are used as initial guesses for the BM3 optimization
performed by the method "estimates" of the class bm4 that, in turn,
outputs the "ini" list for the BM4 EoS optimization.
"""
pol=np.polyfit(vv,en,4)
pder1=np.polyder(pol,1)
pder2=np.polyder(pol,2)
v_r=np.roots(pder1)
vs=v_r*np.conj(v_r)
min_r=np.argmin(vs)
v_ini=np.real(v_r[min_r])
e0_ini=np.polyval(pol, v_ini)
k0_ini=np.polyval(pder2, v_ini)
k0_ini=k0_ini*v_ini
ini=[v_ini, k0_ini, kp, e0_ini]
return ini
def init_bm3(vv,en):
"""
Estimates initial parameters for the V-integrated BM3 EoS in case
the INI keyword is not present in "input.txt"
Args:
vv (list): volumes
en (list): static energies at the corresponding volumes vv
Returns:
"ini" list of V-integrated EoS parameters estimated by a
polynomial fit: v_ini, k0_ini, kp, e0_ini. kp is set to 4.
Note:
such parameters are used as initial guesses for the bm3 optimization.
"""
kp_ini=4.
pol=np.polyfit(vv,en,3)
pder1=np.polyder(pol,1)
pder2=np.polyder(pol,2)
v_r=np.roots(pder1)
vs=v_r*np.conj(v_r)
min_r=np.argmin(vs)
v_ini=np.real(v_r[min_r])
e0_ini=np.polyval(pol, v_ini)
k0_ini=np.polyval(pder2, v_ini)
k0_ini=k0_ini*v_ini
ini=[v_ini, k0_ini, kp_ini, e0_ini]
return ini
# Output the pressure at a given temperature (tt) and volume (vv).
# Kp can be kept fixed (by setting fix=Kp > 0.1)
def pressure(tt,vv,**kwargs):
"""
Computes the pressure at a temperature and volume
Args:
tt: temperature
vv: unit cell volume
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
[ff,veos,err]=bmx_tem(tt,fix=fix_value)
else:
[ff,veos,err]=bmx_tem(tt)
if bm4.flag:
eos=veos[0:4]
return round(bm4.pressure(vv,*eos)*conv/1e-21,3)
else:
eos=veos[0:3]
return round(bm3(vv,*eos)*conv/1e-21,3)
def pressure_dir(tt,vv):
"""
Computes the pressure at a given volume and temperature from
the numerical derivative of the Helmholtz free energy with
respect to the volume (at constant temperature).
Args:
tt: temperature (K)
vv: volume (A^3)
"""
deg=pr.degree_v
if not vd.flag:
vmin=vv-pr.delta_v/2.
vmax=vv+pr.delta_v/2.
else:
vmin=vv-vd.delta/2.
vmax=vv+vd.delta/2.
v_range=np.linspace(vmin,vmax,pr.nump_v)
f_list=np.array([])
for iv in v_range:
fi=free_fit_vt(tt,iv)
f_list=np.append(f_list,fi)
vfit=np.polyfit(v_range,f_list,deg)
vfitder=np.polyder(vfit,1)
press=-1*np.polyval(vfitder,vv)
return press*conv/1e-21
def volume_dir(tt,pp,alpha_flag_1=False, alpha_flag_2=False):
"""
Computes the equilibrium volume at a given temperature and pressure
without using an equation of state.
An initial estimation of the volume is however obtained by using
a BM3 EoS, by calling the eos_temp function; such volume is stored
in the v_new variable.
A list of volumes around the v_new value is then built and, for each
value in the list, a pressure is computed by using the pressure_dir
function, and compared to the input pressure to find the volume
at which the two pressures are equal.
A number of parameters are used to control the computation. They are
all defined by the volume-control driver (volume_ctrl). Convenient
values are already set by default, but they can be changed by using
the method volume_ctrl.set_all. Use the info.show method to get such
values under the 'volume driver section'.
"""
vol_opt.on()
if volume_ctrl.kp_fix:
reset_fix()
if tt < volume_ctrl.t_max:
eos_temp(tt,kp_only=True)
else:
eos_temp(volume_ctrl.t_max,kp_only=True)
set_fix(0)
if (alpha_flag_1) and (not alpha_flag_2):
reset_fix()
eos_temp(tt,kp_only=True)
set_fix(0)
vini=new_volume(tt,pp)
v_new=vini[0] # Initial volume from EoS
if volume_ctrl.t_last_flag:
vini=volume_ctrl.v_last
if (tt > volume_ctrl.t_last) & (volume_ctrl.t_last > 10.):
volume_ctrl.t_last_flag=True
volume_ctrl.shift=0.
volume_ctrl.upgrade_shift=False
if not flag_poly.flag:
if flag_fit_warning.value:
print("It is advised to use polynomial fits for 'dir' calculations\n")
fit_status()
print("")
flag_fit_warning.value=False
if flag_poly.flag:
volume_max=max(flag_poly.fit_vol)
volume_min=min(flag_poly.fit_vol)
if flag_spline.flag:
volume_max=max(flag_spline.fit_vol)
volume_min=min(flag_spline.fit_vol)
if flag_poly.flag:
if vini > volume_max:
flag_volume_max.value=True
if flag_volume_warning.value:
flag_volume_warning.value=False
print("Warning: volume exceeds the maximum value set in volume_range")
print("Volume: %8.4f" % vini)
fit_status()
print("")
# return vini
if flag_spline.flag:
if vini > volume_max:
flag_volume_max.value=True
if flag_volume_warning.value:
flag_volume_warning.value=False
print("Warning: volume exceeds the maximum value set in volume_range")
print("Volume: %8.4f" % vini)
fit_status()
print("")
# return vini
vvi=vini
if volume_ctrl.t_last_flag:
if (tt > volume_ctrl.t_last) & (volume_ctrl.t_last > 10.):
vvi=volume_ctrl.v_last
vplot=vvi
v_list=np.linspace(vvi - volume_ctrl.delta/volume_ctrl.left,\
vvi + volume_ctrl.delta/volume_ctrl.right, 24)
else:
if tt > volume_ctrl.t_dump:
volume_ctrl.shift=volume_ctrl.shift/volume_ctrl.dump
v_list=np.linspace(vini[0]-volume_ctrl.shift - volume_ctrl.delta/volume_ctrl.left,\
vini[0]-volume_ctrl.shift + volume_ctrl.delta/volume_ctrl.right, 24)
vplot=vini[0]
p_list=np.array([])
for iv in v_list:
pi=(pressure_dir(tt,iv)-pp)**2
p_list=np.append(p_list,pi)
fitv=np.polyfit(v_list,p_list,volume_ctrl.degree)
pressure=lambda vv: np.polyval(fitv,vv)
min_p=np.argmin(p_list)
vini=[v_list[min_p]]
if volume_ctrl.degree > 2:
bound=[(volume_min, volume_max)]
vmin=minimize(pressure,vini,method='L-BFGS-B', bounds=bound, tol=1e-10,
options={'gtol':1e-10, 'maxiter':500})
shift=v_new-vmin.x[0]
else:
shrink=volume_ctrl.quad_shrink
new_v=np.linspace(vini[0]-volume_ctrl.delta/shrink, vini[0]+volume_ctrl.delta/shrink,8)
new_p=np.array([])
for iv in new_v:
pi=(pressure_dir(tt,iv)-pp)**2
new_p=np.append(new_p,pi)
fit_new=np.polyfit(new_v, new_p,2)
der_new=np.polyder(fit_new,1)
vmin=-1*der_new[1]/der_new[0]
shift=v_new-vmin
if volume_ctrl.upgrade_shift:
volume_ctrl.shift=shift
if volume_ctrl.degree > 2:
if volume_ctrl.debug:
x1=np.mean(v_list)
x2=np.min(v_list)
x=(x1+x2)/2
y=0.95*np.max(p_list)
y2=0.88*np.max(p_list)
y3=0.81*np.max(p_list)
y4=0.74*np.max(p_list)
plt.figure()
title="Temperature: "+str(round(tt,2))+" K"
plt.plot(v_list,p_list)
plt.xlabel("V (A^3)")
plt.ylabel("Delta_P^2 (GPa^2)")
plt.title(title)
v_opt="Opt volume: "+str(vmin.x[0].round(4))
v_min="Approx volume: "+str(vini[0].round(4))
v_new="EoS volume: "+str(v_new.round(4))
v_ini="V_ini volume: "+str(vplot.round(4))
plt.text(x,y,v_opt,fontfamily='monospace')
plt.text(x,y2,v_min, fontfamily='monospace')
plt.text(x,y3,v_new,fontfamily='monospace')
plt.text(x,y4,v_ini,fontfamily='monospace')
plt.show()
else:
if volume_ctrl.debug:
x1=np.mean(v_list)
x2=np.min(v_list)
x=(x1+x2)/2
y=0.95*np.max(p_list)
y2=0.88*np.max(p_list)
y3=0.81*np.max(p_list)
y4=0.74*np.max(p_list)
plt.figure()
title="Temperature: "+str(round(tt,2))+" K"
plt.plot(v_list,p_list)
plt.plot(new_v, new_p,"*")
plt.xlabel("V (A^3)")
plt.ylabel("Delta_P^2 (GPa^2)")
plt.title(title)
v_opt="Opt. volume: "+str(round(vmin,4))
v_min="Approx volume: "+str(vini[0].round(4))
v_new="EoS Volume: "+str(v_new.round(4))
v_ini="V_ini volume: "+str(vplot.round(4))
plt.text(x,y,v_opt,fontfamily='monospace')
plt.text(x,y2,v_min, fontfamily='monospace')
plt.text(x,y3,v_new,fontfamily='monospace')
plt.text(x,y4,v_ini,fontfamily='monospace')
plt.show()
if volume_ctrl.degree > 2:
test=vmin.success
if not test:
print("\n**** WARNING ****")
print("Optimization in volume_dir not converged; approx. volume returned")
print("temperature: %5.2f, Volume: %6.3f" % (tt, vini[0]))
volume_ctrl.v_last=vini[0]
vol_opt.off()
return vini[0]
else:
volume_ctrl.v_last=vini[0]
return vmin.x[0]
else:
volume_ctrl.v_last=vmin
return vmin
def volume_from_F(tt, shrink=10., npoints=60, debug=False):
"""
Computation of the equilibrium volume at any given temperature
and at 0 pressure. The algorithm looks for the minimum of the
Helmholtz function with respect to V (it is equivalent to the
minimization of the Gibbs free energy function as the pressure is
zero. The methods is very similar to that implemented in the
more general volume_dir function, but it does not require the
calculation of any derivative of F (to get the pressure).
The Helmholtz free energy is computed by means of the free_fit_vt
function.
Args:
tt: temperature (in K)
npoints: number of points in the V range (centered around an
initial volume computed by the volume_dir function),
where the minimum of F is to be searched (default 60).
shrink: shrinking factor for the definition of the V-range for
the optimization of V (default 10).
debug: plots and prints debug information. If debug=False, only
the optimized value of volume is returned.
Note:
The function makes use of parameters sets by the methods of
the volume_F_ctrl instance of the volume_F_control_class class.
In particular, the initial value of volume computed by the
volume_dir function can be shifted by the volume_F_ctrl.shift
value. This value is set by the volume_F_ctrl.set_shift method
provided that the volume_F_ctrl.upgrade_shift flag is True.
"""
delta=volume_ctrl.delta
d2=delta/2.
vini=volume_dir(tt,0)
if volume_F_ctrl.get_flag():
shift=volume_F_ctrl.get_shift()
vini=vini+shift
v_eos=new_volume(tt,0)[0]
vlist=np.linspace(vini-d2, vini+d2, npoints)
flist=list(free_fit_vt(tt, iv) for iv in vlist)
imin=np.argmin(flist)
vmin=vlist[imin]
vlist2=np.linspace(vmin-d2/shrink, vmin+d2/shrink, 8)
flist2=list(free_fit_vt(tt, iv) for iv in vlist2)
fit=np.polyfit(vlist2,flist2,2)
fitder=np.polyder(fit,1)
vref=-fitder[1]/fitder[0]
fref=np.polyval(fit, vref)
v_shift=vref-vini
if volume_F_ctrl.get_flag() & volume_F_ctrl.get_upgrade_status():
volume_F_ctrl.set_shift(v_shift)
vplot=np.linspace(vref-d2/shrink, vref+d2/shrink, npoints)
fplot=np.polyval(fit, vplot)
if debug:
xt=vlist2.round(2)
title="F free energy vs V at T = "+str(tt)+" K"
plt.figure()
ax=plt.gca()
ax.ticklabel_format(useOffset=False)
plt.plot(vlist2, flist2, "k*", label="Actual values")
plt.plot(vplot, fplot, "k-", label="Quadratic fit")
plt.plot(vref,fref,"r*", label="Minimum from fit")
plt.legend(frameon=False)
plt.xlabel("Volume (A^3)")
plt.ylabel("F (a.u.)")
plt.xticks(xt)
plt.title(title)
plt.show()
print("\nInitial volume from volume_dir: %8.4f" % vini)
print("Volume from EoS fit: %8.4f" % v_eos)
print("Approx. volume at minimum F (numerical): %8.4f" % vmin)
print("Volume at minimum (from fit): %8.4f\n" % vref)
return vref
else:
return vref
def volume_from_F_serie(tmin, tmax, npoints, fact_plot=10, debug=False, expansion=False, degree=4,
fit_alpha=False, export=False, export_alpha=False, export_alpha_fit=False):
"""
Volume and thermal expansion (at zero pressure) in a range of temperatures,
computed by the minimization of the Helmholtz free energy function.
Args:
tmin, tmax, npoints: minimum, maximum and number of points defining
the T range
fact_plot: factor used to compute the number of points for the plot
(default 10)
debug: debugging information (default False)
expansion: computation of thermal expansion (default False)
degree: if expansion=True, in order to compute the thermal expansion
a log(V) vs T polynomial fit of degree 'degree' is performed
(default 4)
fit_alpha: thermal expansion is fitted to a power serie (default False)
export: list of computed volume is exported (default False)
export_alpha_fit: coefficients of the power series fitting the alpha's
are exported
Note:
Thermal expansion is computed from a log(V) versus T polynomial fit
Note:
if export is True, the volume list only is exported (and the function
returns) no matter if expansion is also True (that is, thermal expansion
is not computed). Likewise, if export_alfa is True, no fit of the thermal
expansion data on a power serie is performed (and, therefore, such data from
the fit cannot be exported).
Note:
Having exported the coefficients of the power serie fitting the alpha values,
they can be uploaded to a particular phase by using the load_alpha method
of the mineral class; e.g. py.load_alpha(alpha_fit, power_a)
Examples:
>>> alpha_fit=volume_from_F_serie(100, 400, 12, expansion=True, fit_alpha=True, export_alpha_fit=True)
>>> py.load_alpha(alpha_fit, power_a)
>>> py.info()
"""
t_list=np.linspace(tmin, tmax, npoints)
v_list=list(volume_from_F(it, debug=debug) for it in t_list)
if export:
return v_list
plt.figure()
plt.plot(t_list, v_list, "k-")
plt.xlabel("T (K)")
plt.ylabel("V (A^3)")
plt.title("Volume vs Temperature at zero pressure")
plt.show()
if expansion:
logv=np.log(v_list)
fit=np.polyfit(t_list, logv, degree)
fitder=np.polyder(fit, 1)
alpha_list=np.polyval(fitder, t_list)
if export_alpha:
return alpha_list
t_plot=np.linspace(tmin, tmax, npoints*fact_plot)
lv_plot=np.polyval(fit, t_plot)
label_fit="Polynomial fit, degree: "+str(degree)
plt.figure()
plt.title("Log(V) versus T")
plt.xlabel("T (K)")
plt.ylabel("Log(V)")
plt.plot(t_list, logv, "k*", label="Actual values")
plt.plot(t_plot, lv_plot, "k-", label=label_fit)
plt.legend(frameon=False)
plt.show()
plt.figure()
plt.title("Thermal expansion")
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.plot(t_list, alpha_list, "k*", label="Actual values")
if fit_alpha:
if not flag_alpha:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
else:
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
alpha_value=[]
for ict in t_plot:
alpha_i=alpha_dir_fun(ict,*alpha_fit)
alpha_value=np.append(alpha_value,alpha_i)
plt.plot(t_plot,alpha_value,"k-", label="Power serie fit")
plt.legend(frameon=False)
plt.show()
if export_alpha_fit & flag_alpha & fit_alpha:
return alpha_fit
def volume_conversion(vv, atojb=True):
"""
Volume conversion from/to unit cell volume (in A^3) to/from the molar volume
(in J/bar)
Args:
vv: value of volume (in A^3 or J/bar)
atojb: if aotjb is True (default), conversion is from A^3 to J/bar
if atojb is False, conversion is from J/bar to A^3
"""
if atojb:
vv=vv*avo*1e-25/zu
print("Molar volume: %7.4f J/bar" % vv)
else:
vv=vv*zu*1e25/avo
print("Cell volume: %7.4f A^3" % vv)
def find_temperature_vp(vv,pp, tmin=100., tmax=1000., prt=True):
nt=50
t_list=np.linspace(tmin,tmax,nt)
v_list=list(volume_dir(it,pp) for it in t_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
t_0=t_list[min_diff]
delta=20.
t_min=t_0-delta
t_max=t_0+delta
t_list=np.linspace(t_min,t_max,nt)
v_list=list(volume_dir(it,pp) for it in t_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
t_0f=t_list[min_diff]
if prt:
print("Temperature found:")
print("First guess %5.2f; result: %5.2f K" % (t_0, t_0f))
else:
return t_0f
def find_pressure_vt(vv,tt, pmin, pmax, prt=True):
npp=50
p_list=np.linspace(pmin,pmax,npp)
v_list=list(volume_dir(tt,ip) for ip in p_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
p_0=p_list[min_diff]
delta=0.5
p_min=p_0-delta
p_max=p_0+delta
p_list=np.linspace(p_min,p_max,npp)
v_list=list(volume_dir(tt,ip) for ip in p_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
p_0f=p_list[min_diff]
if prt:
print("Pressure found:")
print("First guess %5.2f; result: %5.2f GPa" % (p_0, p_0f))
else:
return p_0f
def bulk_dir(tt,prt=False, out=False, **kwargs):
"""
Optimizes a BM3 EoS from volumes and total pressures at a given
temperature. In turn, phonon pressures are directly computed as volume
derivatives of the Helmholtz function; static pressures are from a V-BM3
fit of E(V) static data.
Negative pressures are excluded from the computation.
Args:
tt: temperature
prt (optional): if True, prints a P(V) list; default: False
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
"""
flag_volume_max.value=False
l_arg=list(kwargs.items())
fixpar=False
flag_serie=False
vol_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if 'serie' == karg_i[0]:
flag_serie=karg_i[1]
if 'volume' == karg_i[0]:
vol_flag=karg_i[1]
[dum,pterm,dum]=bmx_tem(tt)
ini=pterm[0:3]
flag_x=False
if f_fix.flag:
fix=f_fix.value
flag_x=True
p0_f=[ini[0],ini[1]]
if fixpar:
if fix_value < 0.1:
flag_x=False
else:
fix=fix_value
flag_x=True
p0_f=[ini[0],ini[1]]
if flag_spline.flag:
v_list=flag_spline.fit_vol
elif flag_poly.flag:
v_list=flag_poly.fit_vol
else:
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
f_fix_orig=f_fix.flag
volmax=volume_dir(tt,0.)
if flag_volume_max.value:
print("Computation stop. Use set_volume_range to fix the problem")
stop()
volnew=np.append(v_list,volmax)
p_list=np.array([])
for vi in volnew:
pi=pressure_dir(tt,vi)
p_list=np.append(p_list,pi)
v_new=np.array([])
p_new=np.array([])
for iv in zip(volnew,p_list):
if iv[1]>=-0.01:
v_new=np.append(v_new,iv[0])
p_new=np.append(p_new,iv[1])
try:
if flag_x:
pdir, pcov_dir = curve_fit(lambda v_new, v0, k0: \
bm3(v_new, v0, k0, fix), \
v_new, p_new, p0=p0_f, method='dogbox',\
ftol=1e-15, xtol=1e-15)
else:
pdir, pcov_dir = curve_fit(bm3, v_new, p_new, \
method='dogbox', p0=ini[0:3], ftol=1e-15, xtol=1e-15)
perr_t=np.sqrt(np.diag(pcov_dir))
except RuntimeError:
print("EoS optimization did not succeeded for t = %5.2f" % tt)
flag_dir.on()
if flag_serie:
return 0,0
else:
return
if flag_x:
pdir=np.append(pdir,fix)
perr_t=np.append(perr_t,0.00)
if flag_serie and vol_flag:
return pdir[0],pdir[1],pdir[2]
if flag_serie:
return pdir[1],pdir[2]
if out:
return pdir[0], pdir[1], pdir[2]
print("\nBM3 EoS from P(V) fit\n")
print("K0: %8.2f (%4.2f) GPa" % (pdir[1],perr_t[1]))
print("Kp: %8.2f (%4.2f) " % (pdir[2],perr_t[2]))
print("V0: %8.4f (%4.2f) A^3" % (pdir[0],perr_t[0]))
info.temp=tt
info.k0=pdir[1]
info.kp=pdir[2]
info.v0=pdir[0]
vol=np.linspace(min(v_new),max(v_new),16)
press=bm3(vol,*pdir)
plt.figure()
plt.title("BM3 fit at T = %5.1f K\n" % tt)
plt.plot(v_new,p_new,"k*")
plt.plot(vol,press,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("Pressure (GPa)")
plt.show()
if not f_fix_orig:
reset_fix()
if prt:
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in zip(v_new,p_new):
print(" %5.3f %5.2f" % (vp_i[0], vp_i[1]))
def bulk_dir_serie(tini, tfin, npoints, degree=2, update=False, **kwargs):
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_serie=np.linspace(tini, tfin, npoints)
tx_serie=np.array([])
b_serie=np.array([])
for ti in t_serie:
flag_dir.off()
if not fixpar:
bi,kpi=bulk_dir(ti,serie=True)
else:
bi,kpi=bulk_dir(ti, serie=True, fix=fix_value)
if not flag_dir.value:
b_serie=np.append(b_serie,bi)
tx_serie=np.append(tx_serie,ti)
else:
pass
t_serie=tx_serie
plt.figure()
plt.plot(t_serie,b_serie,"k*")
plt.title("Bulk modulus (K0)")
plt.xlabel("T(K)")
plt.ylabel("K (GPa)")
plt.title("Bulk modulus as a function of T")
fit_b=np.polyfit(t_serie,b_serie,degree)
b_fit=np.polyval(fit_b,t_serie)
plt.plot(t_serie,b_fit,"k-")
print("\nResults from the fit (from high to low order)")
np.set_printoptions(formatter={'float': '{: 4.2e}'.format})
print(fit_b)
np.set_printoptions(formatter=None)
plt.show()
if update:
return fit_b
volume_ctrl.shift=0.
def bm4_dir(tt,prt=True):
"""
Optimizes a BM4 EoS from volumes and total pressures at a given
temperature. Negative pressures are excluded from the computation.
Args:
tt: temperature
prt (optional): if True, prints a P(V) list; default: False
"""
flag_volume_max.value=False
start_bm4()
if flag_spline.flag:
v_list=flag_spline.fit_vol
elif flag_poly.flag:
v_list=flag_poly.fit_vol
else:
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
volmax=volume_dir(tt,0.)
if flag_volume_max.value:
print("Computation stop. Use set_volume_range to fix the problem")
stop()
volnew=np.append(v_list,volmax)
p_list=np.array([])
for vi in volnew:
pi=pressure_dir(tt,vi)
p_list=np.append(p_list,pi)
v_new=np.array([])
p_new=np.array([])
for iv in zip(volnew,p_list):
if iv[1]>=-0.01:
v_new=np.append(v_new,iv[0])
p_new=np.append(p_new,iv[1])
ini=np.copy(bm4.en_ini[0:4])
ini[1]=ini[1]*conv*1e21
pdir, pcov_dir = curve_fit(bm4.pressure, v_new, p_new, \
p0=ini, ftol=1e-15, xtol=1e-15)
perr_t=np.sqrt(np.diag(pcov_dir))
print("\nBM4 EoS from P(V) fit\n")
print("K0: %8.2f (%4.2f) GPa" % (pdir[1],perr_t[1]))
print("Kp: %8.2f (%4.2f) " % (pdir[2],perr_t[2]))
print("Kpp: %8.2f (%4.2f) " % (pdir[3], perr_t[3]))
print("V0: %8.4f (%4.2f) A^3" % (pdir[0],perr_t[0]))
vol=np.linspace(min(v_new),max(v_new),16)
press=bm4.pressure(vol,*pdir)
plt.figure()
plt.title("BM4 fit at T = %5.1f K\n" % tt)
plt.plot(v_new,p_new,"k*")
plt.plot(vol,press,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("Pressure (GPa)")
plt.show()
if prt:
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in zip(v_new,p_new):
print(" %5.3f %5.2f" % (vp_i[0], vp_i[1]))
def bulk_modulus_p(tt,pp,noeos=False,prt=False,**kwargs):
"""
Bulk modulus at a temperature and pressure
Args:
tt: temperature
pp: pressure
noeos: to compute pressures, the bm3 EoS is used if
noeos=False (default); otherwise the EoS is
used only for the static part, and vibrational
pressures are obtained from the derivative
of the F function (pressure_dir function)
prt: if True, results are printed
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1. This is relevant
if noeos=False
The values are computed through the direct derivative -V(dP/dV)_T.
Since the computation of pressure requires the bm3_tem function
(if noeos=False) Kp can be kept fixed by setting fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if not noeos:
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)[0]
else:
vol=new_volume(tt,pp)[0]
else:
vol=volume_dir(tt,pp)
if not vd.flag:
delta=pr.delta_v
else:
delta=vd.delta
numv=pr.nump_v
degree=pr.degree_v
v_range=np.linspace(vol-delta/2.,vol+delta/2.,numv)
press_range=[]
for iv in v_range:
if not noeos:
if fixpar:
p_i=pressure(tt,iv,fix=fix_value)
else:
p_i=pressure(tt,iv)
else:
p_i=pressure_dir(tt,iv)
press_range=np.append(press_range,p_i)
press_fit=np.polyfit(v_range,press_range,degree)
b_poly=np.polyder(press_fit,1)
b_val=np.polyval(b_poly,vol)
b_val=(-1*b_val*vol)
if prt:
eos=str(noeos)
print("Bulk Modulus at T = %5.1f K and P = %3.1f GPa, noeos = %s: %6.3f GPa, V = %6.3f " %\
(tt,pp,eos,b_val, vol))
else:
b_val=round(b_val,3)
return b_val, vol
def bulk_modulus_p_serie(tini, tfin, nt, pres, noeos=False, fit=False, type='poly', \
deg=2, smooth=5, out=False, **kwargs):
"""
Computes the bulk modulus from the definition K=-V(dP/dV)_T in a range
of temperature values
Args:
tini: lower temperature in the range
tfin: higher temperature in the range
nt: number of points in the [tini, tfin] range
pres: pressure (GPa)
noeos: see note below
fit: if True, a fit of the computed K(T) values is performed
type: type of the fit ('poly', or 'spline')
deg: degree of the fit
smooth: smooth parameter for the fit; relevant if type='spline'
out: if True, the parameters of the K(T) and V(T) fits are printed
Keyword Args:
fix: if fix is provided, Kp is kept fixed at the fix value
Relevant if noeos=False
Note:
if noeos=False, the pressure at any given volume is calculated
from the equation of state. If noeos=True, the pressure is computed
as the first derivative of the Helmholtz function (at constant
temperature)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_list=np.linspace(tini, tfin, nt)
b_l=np.array([])
t_l=np.array([])
v_l=np.array([])
if fixpar:
for it in t_list:
ib, v_val=bulk_modulus_p(it,pres,noeos=noeos,fix=fix_value)
if vol_opt.flag:
b_l=np.append(b_l,ib)
t_l=np.append(t_l,it)
v_l=np.append(v_l,v_val)
else:
for it in t_list:
ib,v_val=bulk_modulus_p(it,pres,noeos=noeos)
if vol_opt.flag:
t_l=np.append(t_l,it)
b_l=np.append(b_l,ib)
v_l=np.append(v_l,v_val)
if fit:
t_fit=np.linspace(tini,tfin,50)
if type=='poly':
fit_par=np.polyfit(t_l,b_l,deg)
b_fit=np.polyval(fit_par,t_fit)
fit_par_v=np.polyfit(t_l,v_l,deg)
v_fit=np.polyval(fit_par_v,t_fit)
elif type=='spline':
fit_par=UnivariateSpline(t_l,b_l,k=deg,s=smooth)
b_fit=fit_par(t_fit)
fit_par_v=UnivariateSpline(t_l,v_l,k=deg,s=0.1)
v_fit=fit_par_v(t_fit)
method='poly'
if type=='spline':
method='spline'
lbl=method+' fit'
plt.figure()
plt.plot(t_l,b_l,"k*",label='Actual values')
if fit:
plt.plot(t_fit, b_fit,"k-",label=lbl)
plt.xlabel("Temperature (K)")
plt.ylabel("K (GPa)")
tlt="Bulk modulus at pressure "+str(pres)
plt.title(tlt)
plt.legend(frameon=False)
plt.show()
reset_fix()
if out & fit:
return fit_par, fit_par_v
def bulk_modulus_adiabat(tt,pp,noeos=False, prt=True,**kwargs):
"""
Adiabatic bulk modulus at a temperature and pressure
Args:
tt: temperature
pp: pressure
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1
The values are computed through the direct derivative -V(dP/dV)_T.
Since the computation of pressure requires the bm3_tem function,
Kp can be kept fixed by setting fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)[0]
alpha,kt_dum,pr=thermal_exp_v(tt,vol,False,fix=fix_value)
kt,_=bulk_modulus_p(tt,pp,noeos=noeos,fix=fix_value)
ent,cv=entropy_v(tt,vol,False,False,fix=fix_value)
else:
vol=new_volume(tt,pp)[0]
alpha,kt_dum,pr=thermal_exp_v(tt,vol,False)
kt,_=bulk_modulus_p(tt,pp,noeos=noeos)
ent,cv=entropy_v(tt,vol,False,False)
volm=(vol*avo*1e-30)/zu
ks=kt*(1+volm*(tt*1e9*kt*alpha**2)/cv)
if prt:
print("\nAdiabatic bulk modulus Ks: %5.2f GPa" % ks)
print("Isoth. Kt: %5.2f GPa, alpha: %5.2e K^-1, sp. heat Cv: %6.2f J/mol K"\
% (kt, alpha, cv))
print("Cell volume: %6.2f A^3, molar volume %6.2f cm^3" % (vol, 1e6*volm))
else:
return ks
def static(plot=False, vmnx=[0., 0.]):
"""
Static EoS
Args:
plot: plot of the E(V) curve
vmnx: array of two reals [vmin and vmax]; vmin is the
minimum volume and vmax is the maximum volume.
If vmin and vmax are both 0., the whole V range
is used (as specified in the static energies file).
Default=[0., 0.]
Note:
The volume range can also be modified by using the methods
of the static_volume class
Examples:
>>> static_volume.set(100., 120.)
>>> static_volume.on()
>>> static(plt=True)
Computes the static EoS in the [100., 120.] volume range. The same
is obtained with
>>> static(plt=True, vmnx=[100., 120.])
However, with the first method the defined volume range is recorded for
future computations; by using the second method, the volume range is reset
to the original one, once the fit is performed.
"""
global pcov
if flag_err:
return None
vol_flag=False
if static_range.flag:
vol_min=static_range.vmin
vol_max=static_range.vmax
vol_flag=True
else:
if (vmnx[0] > 0.1) or (vmnx[1] > 0.1):
vol_flag=True
vol_min=vmnx[0]
vol_max=vmnx[1]
if vol_flag:
vol_select=(volume >= vol_min) & (volume <= vol_max)
vol_selected=volume[vol_select]
energy_selected=energy[vol_select]
if not vol_flag:
popt, pcov = curve_fit(v_bm3, volume, energy, p0=ini,ftol=1e-15,xtol=1e-15)
else:
popt, pcov = curve_fit(v_bm3, vol_selected, energy_selected, p0=ini,ftol=1e-15,xtol=1e-15)
k_gpa=popt[1]*conv/1e-21
kp=popt[2]
v0=popt[0]
perr=np.sqrt(np.diag(pcov))
ke=perr[1]*conv/1e-21
print("\nStatic BM3 EoS")
print("\nBulk Modulus: %5.2f (%4.2f) GPa" % (k_gpa, ke))
print("Kp: %5.2f (%4.2f)" % (kp, perr[2]))
print("V0: %5.4f (%4.2f) A^3" % (v0, perr[0]))
print("E0: %5.8e (%4.2e) hartree" % (popt[3], perr[3]))
if vol_flag:
print("\nStatic EoS computed in a restricted volume range:")
print(vol_selected)
print("\n")
info.k0_static=k_gpa
info.kp_static=kp
info.v0_static=v0
info.popt=popt
info.popt_orig=popt
vd.set_delta(v0)
vol_min=np.min(volume)
vol_max=np.max(volume)
nvol=50
vol_range=np.linspace(vol_min,vol_max,nvol)
if plot:
plt.figure(0)
plt.title("E(V) static BM3 curve")
plt.plot(volume,energy,"*")
plt.plot(vol_range, v_bm3(vol_range, *popt), 'b-')
plt.ylabel("Static energy (a.u.)")
plt.xlabel("V (A^3)")
plt.show()
def p_static(nvol=50, v_add=[], e_add=[]):
"""
Computes a static BM3-EoS from a P/V set of data. Data (cell volumes in A^3 and
pressures in GPa) must be contained in a file whose name must be specified
in the input file (together with the energy, in hartree, at the equilibrium
static volume.
Args:
nvol: number of volume points for the graphical output (default 50)
v_add / e_add: lists of volume/energy data to be plotted together
with the E/V curve from the V-EoS fit. Such added
points are not used in the fit (no points added as default)
Note:
This function provides static data for the calculation of the static
contribution to the Helmholtz free energy. It is an alternative to
the fit of the static E/V data performed by the 'static' function.
"""
add_flag=False
if v_add != []:
add_flag=True
p_data=np.loadtxt(data_p_file)
pres_gpa=p_data[:,1]
vs=p_data[:,0]
pres=pres_gpa*1e-21/conv
pstat, cstat = curve_fit(bm3, vs, pres, p0=ini[0:3],ftol=1e-15,xtol=1e-15)
info.popt=pstat
info.popt=np.append(info.popt,static_e0)
k_gpa=info.popt[1]*conv/1e-21
kp=info.popt[2]
v0=info.popt[0]
info.k0_static=k_gpa
info.kp_static=kp
info.v0_static=v0
print("\nStatic BM3 EoS")
print("\nBulk Modulus: %5.2f GPa" % k_gpa)
print("Kp: %5.2f " % kp )
print("V0: %5.4f A^3" % v0)
print("E0: %5.8e hartree" % info.popt[3])
vol_min=np.min(vs)
vol_max=np.max(vs)
ps=info.popt[0:3]
vol_range=np.linspace(vol_min,vol_max,nvol)
p_GPa=bm3(vol_range, *ps)*conv/1e-21
plt.figure(0)
plt.title("P(V) static BM3 curve")
plt.plot(vs,pres_gpa,"*")
plt.plot(vol_range, p_GPa, 'b-')
plt.ylabel("Pressure (GPa)")
plt.xlabel("V (A^3)")
plt.show()
p_stat.flag=True
p_stat.vmin=np.min(vs)
p_stat.vmax=np.max(vs)
p_stat.pmin=np.min(pres_gpa)
p_stat.pmax=np.max(pres_gpa)
p_stat.npoints=vs.size
p_stat.k0=k_gpa
p_stat.kp=kp
p_stat.v0=v0
p_stat.e0=static_e0
energy_static=v_bm3(vol_range, *info.popt_orig)
energy_pstatic=v_bm3(vol_range, *info.popt)
delta=energy_pstatic-energy_static
select=(volume >= vol_min) & (volume <= vol_max)
vv=volume[select]
ee=energy[select]
plt.figure()
plt.plot(vol_range, energy_static, "k-", label="STATIC case")
plt.plot(vol_range, energy_pstatic, "k--", label="PSTATIC case")
plt.plot(vv,ee,"k*", label="Original E(V) data")
if add_flag:
plt.plot(v_add, e_add, "r*", label="Not V-BM3 fitted data")
plt.legend(frameon=False)
plt.xlabel("Volume (A^3)")
plt.ylabel("E (hartree)")
plt.title("E(V) curves")
plt.show()
plt.figure()
plt.plot(vol_range,delta,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("E (hartree)")
plt.title("Pstatic and static energy difference")
plt.show()
delta=abs(delta)
mean=delta.mean()
mean_j=mean*conv*avo/zu
std=delta.std()
imx=np.argmax(delta)
mx=delta[imx]
vx=vol_range[imx]
print("Mean discrepancy: %6.3e hartree (%5.1f J/mole)" % (mean, mean_j))
print("Standard deviation: %4.1e hartree" % std)
print("Maximum discrepancy %6.3e hartree for a volume of %6.2f A^3" % (mx, vx))
def static_pressure_bm3(vv):
"""
Outputs the static pressure (in GPa) at the volume (vv)
Args:
vv: volume
"""
static(plot=False)
k0=info.popt[1]
kp=info.popt[2]
v0=info.popt[0]
p_static_bm3=bm3(vv,v0, k0,kp)
ps=p_static_bm3*conv/1e-21
print("Static pressure at the volume: %4.2f" % ps)
def start_bm4():
bm4.on()
bm4.estimates(volume,energy)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bm4p, bm4c = curve_fit(bm4.energy, volume, energy, \
method='dogbox', p0=bm4.en_ini,ftol=1e-15,xtol=1e-15,gtol=1e-15)
bm4.store(bm4p)
bm4.upgrade()
bm4.upload(bm4p)
bm4_k=bm4p[1]*conv/1e-21
kp=bm4p[2]
kpp=bm4p[3]
v0=bm4p[0]
print("\nStatic BM4-EoS")
print("\nBulk Modulus: %5.2f GPa" % bm4_k)
print("Kp: %5.2f " % kp)
print("Kpp: %5.2f " % kpp)
print("V0: %8.4f A^3" % v0)
print("\n")
plt.figure()
# bm4e=np.array([])
vbm4=np.linspace(min(volume),max(volume),50)
bm4e=bm4.energy(vbm4,*bm4.bm4_static_eos)
plt.plot(vbm4,bm4e,"k-")
plt.plot(volume,energy,"k*")
plt.title("Static Energy: BM4 fit")
plt.xlabel("Static energy (a.u.)")
plt.ylabel("V (A^3)")
plt.show()
def free(temperature):
"""
Computes the Helmholtz free energy (hartree) at a given temperature
Args:
temperature: temperature (in K) at which the computation is done
Note:
1. ei is the static energy
2. enz_i is the zero point energy
3. fth_i is thermal contribution to the Helmholtz free energy
4. tot_i is the total Helmholtz free energy
Note:
This is a direct calculation that avoids the fit of a polynomium
to the frequencies. No FITVOL in input.txt
Note:
If kieffer.flag is True, the contribution from acoustic branches
is taken into account, by following the Kieffer model.
"""
energy_tot=[]
for ivol in int_set:
vol_i=data_vol_freq_orig[ivol]
if bm4.flag:
ei=bm4.energy(vol_i,*bm4.bm4_static_eos)
else:
ei=v_bm3(vol_i, *info.popt)
enz_i=0.
fth_i=0.
eianh=0.
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
freq_i=lo.data_freq[ifreq,ivol+1]
if freq_i >= 0.:
fth_i=fth_i+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/temperature))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz_i=enz_i+deg[ifreq]*freq_i*ez_fact
evib_i=enz_i+fth_i*kb*temperature/conv+eianh
tot_i=ei+evib_i
energy_tot=np.append(energy_tot,tot_i)
if kieffer.flag:
free_k=kieffer.get_value(temperature)
free_k=free_k/(avo*conv)
energy_tot=energy_tot+free_k
return energy_tot
def free_fit(temperature):
"""
Computes the Helmholtz free energy (in hartree) at a given temperature
Args:
temperature: temperature (in K)
Note:
1. ei is the static energy
2. enz_i is the zero point energy
3. fth_i is thermal contribution to the Helmholtz free energy
4. tot_i is the total Helmholtz free energy
Note:
This computation makes use of polynomia fitted
to the frequencies of each vibrational mode, as
functions of volume. It is activated by the keyword
FITVOL in the input.txt file
Note:
Possible contributions from anharmonicity (keyword ANH in the input
file) or from a modified Kieffer model (keyword KIEFFER in the input file)
are included. NO contribution from DISP modes is considered (phonon dispersion
from a supercell calculation).
Note: the volumes at which the free energy refers are defined in the fit_vol
list
"""
energy_tot=[]
eianh=0.
if flag_spline.flag:
fit_vol=flag_spline.fit_vol
elif flag_poly.flag:
fit_vol=flag_poly.fit_vol
for ivol in fit_vol:
if bm4.flag:
ei=bm4.energy(ivol,*bm4.bm4_static_eos)
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
else:
ei=v_bm3(ivol,*info.popt)
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
enz_i=0.
fth_i=0.
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
if not flag_spline.flag:
freq_i=freq_v_fun(ifreq,ivol)
else:
freq_i=freq_spline_v(ifreq,ivol)
if freq_i >= 0.:
fth_i=fth_i+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/temperature))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz_i=enz_i+deg[ifreq]*freq_i*ez_fact
evib_i=enz_i+fth_i*kb*temperature/conv+eianh
tot_i=ei+evib_i
energy_tot=np.append(energy_tot,tot_i)
if kieffer.flag:
free_k=kieffer.get_value(temperature)
free_k=free_k/(avo*conv)
energy_tot=energy_tot+free_k
return energy_tot
def free_fit_vt(tt,vv):
"""
Computes the Helmholtz free energy at a given pressure and volume.
Free energy is computed by addition of several contributions:
(1) static contribution from a volume-integrated BM3 EoS
(2) vibrational contribution from optical vibrational modes
(3) vibrational contribution from phonon dispersion (supercell calculations)
(4) vibrational contribution from acoustic modes (modified Kieffer model)
(5) vibrational contribution from anharmonic mode(s)
Contributions (1) and (2) are always included; contributions (3) and (4)
are mutually exclusive and are respectively activated by the keywords
DISP and KIEFFER in the input file; anharmonic contributions (5) are activated
by the keyword ANH in the input file.
Args:
tt: temperature (K)
vv: volume (A^3)
"""
e_static=v_bm3(vv,*info.popt)
enz=0
fth=0
eianh=0.
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,vv,tt)*anharm.wgt[im]
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
if not flag_spline.flag:
freq_i=freq_v_fun(ifreq,vv)
else:
freq_i=freq_spline_v(ifreq,vv)
if freq_i >= 0.:
fth=fth+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/tt))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz=enz+deg[ifreq]*freq_i*ez_fact
tot_no_static=enz+fth*kb*tt/conv+eianh
tot=e_static+tot_no_static
if kieffer.flag:
free_k=kieffer.get_value(tt)
free_k=free_k/(avo*conv)
tot=tot+free_k
if disp.flag and (disp.eos_flag or disp.thermo_vt_flag):
if not disp.fit_vt_flag:
disp.free_fit_vt()
print("\n**** INFORMATION ****")
print("The V,T-fit of the phonon dispersion surface was not prepared")
print("it has been perfomed with default values of the relevant parameters")
print("Use the disp.free_fit_vt function to redo with new parameters\n")
disp_l=disp.free_vt(tt,vv)
free_f=(tot_no_static+disp_l)/(disp.molt+1)
tot=e_static+free_f
return tot
def eos_temp_range(vmin_list, vmax_list, npp, temp):
"""
EoS computed for different volumes ranges
Args:
vmin_list: list of minimum volumes
vmax_list: list of maximum volumes
npp: number of points in each V-range
temp: temperature
Note:
vmin_list and vmax_list must be lists of same length
"""
final=np.array([])
size=len(vmin_list)
for vmin, vmax in zip(vmin_list,vmax_list):
v_list=np.linspace(vmin,vmax,npp)
free_list=np.array([])
for iv in v_list:
ifree=free_fit_vt(temp, iv)
free_list=np.append(free_list,ifree)
pterm, pcov_term = curve_fit(v_bm3, v_list, free_list, \
p0=ini, ftol=1e-15, xtol=1e-15)
k_gpa=pterm[1]*conv/1e-21
k_gpa_err=pcov_term[1]*conv/1e-21
pmax=pressure(temp,vmin)
pmin=pressure(temp,vmax)
final=np.append(final, [vmin, vmax, round(pmax,1), round(pmin,1), round(pterm[0],4), round(k_gpa,2), \
round(pterm[2],2)])
final=final.reshape(size,7)
final=final.T
pd.set_option('colheader_justify', 'center')
df=pd.DataFrame(final, index=['Vmin','Vmax','Pmax','Pmin','V0','K0','Kp'])
df=df.T
print("\nBM3-EoS computed for different volume ranges")
print("Temperature: %6.1f K" % temp)
print("")
print(df.to_string(index=False))
def g_vt_dir(tt,pp,**kwargs):
flag_volume_max.value=False
l_arg=list(kwargs.items())
v0_flag=False
g0_flag=False
for karg_i in l_arg:
if 'g0' == karg_i[0]:
g0_flag=True
gexp=karg_i[1]
elif 'v0' == karg_i[0]:
v0_flag=True
v0_value=karg_i[1]
vol0=volume_dir(298.15,0.0001)
fact=1.
if v0_flag:
fact=(1e25*v0_value*zu/avo)/vol0
gref=free_fit_vt(298.15,vol0)*conv*avo/zu + 0.0001*vol0*fact*avo*1e-21/zu
if g0_flag:
gref=gref-gexp
vv=volume_dir(tt,pp)
if flag_volume_max.value:
flag_volume_max.inc()
if flag_volume_max.jwar < 2:
print("Warning g_vt_dir: volume exceeds maximum set in volume_range")
free_f=free_fit_vt(tt,vv)
gtv=(avo/zu)*(free_f*conv) + (avo/zu)*pp*vv*fact*1e-21
return gtv-gref
def entropy_v(tt,vv, plot=False, prt=False, **kwargs):
"""
Entropy and specific heat at constant volume
Args:
tt: temperature
vv: volume
plot (optional): (default False) plots free energy vs T for checking
possible numerical instabilities
prt (optional): (default False) prints formatted output
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Returns:
if prt=False (default) outputs the entropy and the specific heat
at constant volume (unit: J/mol K). if prt=True, a formatted
output is printed and the function provides no output
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
if delta_ctrl.adaptive:
delta=delta_ctrl.get_delta(tt)
else:
delta=delta_ctrl.get_delta()
maxv=max(data_vol_freq)
free_f=[]
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
t_range=np.linspace(min_t,max_t,nump)
for i_t in t_range:
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(i_t,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(i_t)
if (pterm[0]>maxv):
if flag_warning.value:
print("\nWarning: volume out of range; reduce temperature")
flag_warning.off()
flag_warning.inc()
if bm4.flag:
f1=bm4.energy(vv,*pterm)
else:
f1=v_bm3(vv,*pterm)
free_f=np.append(free_f,f1)
if disp.flag:
disp_l=[]
disp.free_fit(disp.temp,vv,disp=False)
for i_t in t_range:
if not disp.thermo_vt_flag:
idf=disp.free_func(i_t)
else:
idf=disp.free_vt(i_t,vv)
disp_l=np.append(disp_l,idf)
free_f=(free_f+disp_l)/(disp.molt+1)
if plot:
plt.figure(4)
plt.plot(t_range,free_f,"*")
plt.title("F free energy (a.u.)")
plt.show()
fit=np.polyfit(t_range,free_f,degree)
der1=np.polyder(fit,1)
der2=np.polyder(fit,2)
entropy=-1*np.polyval(der1,tt)*conv*avo/zu
cv=-1*np.polyval(der2,tt)*tt*conv*avo/zu
if prt:
print("\nEntropy: %7.2f J/mol K" % entropy)
print("Specific heat (at constant volume): %7.2f J/mol K" % cv)
return None
else:
return entropy, cv
def entropy_dir_v(tt, vv, prt=False):
"""
Computation of the entropy at a given volume by means of the free_fit_vt
function. The method is EoS free and automatically includes contributions
from optic modes, off-center modes and anharmonic modes.
Args:
tt: temperature (K)
vv: cell volume (A^3)
prt: detailed output
Note:
In case phonon dispersion is included, the disp.thermo_vt mode
must be activated. The function checks and, in case, activates such
mode.
"""
if disp.flag:
if not disp.thermo_vt_flag:
print("Warning: disp.thermo_vt activation")
disp.thermo_vt_on()
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
if delta_ctrl.adaptive:
delta=delta_ctrl.get_delta(tt)
else:
delta=delta_ctrl.get_delta()
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
free_f=np.array([])
t_range=np.linspace(min_t,max_t,nump)
for it in t_range:
ifree=free_fit_vt(it,vv)
free_f=np.append(free_f, ifree)
free_fit=np.polyfit(t_range, free_f, degree)
free_der1=np.polyder(free_fit,1)
free_der2=np.polyder(free_fit,2)
entropy=-1*np.polyval(free_der1,tt)*conv*avo/zu
cv=-1*np.polyval(free_der2,tt)*tt*conv*avo/zu
if prt:
print("\nEntropy: %7.2f J/mol K" % entropy)
print("Specific heat (at constant volume): %7.2f J/mol K" % cv)
return None
else:
return entropy, cv
def entropy_p(tt,pp,plot=False,prt=True, dir=False, **kwargs):
"""
Entropy and specific heat at constant volume at selected temperature
and pressure
Args:
tt: temperature
pp: pressure
plot (optional): (default False) plots free energy vs T for checking
possible numerical instabilities
prt (optional): (default True) prints formatted output
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Returns:
if prt=False outputs the entropy (J/mol K); if prt=True (default),
a formatted output is printed and the function returns None
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
if dir:
vol=volume_dir(tt,pp)
ent_v=entropy_dir_v(tt, vol, prt)
else:
ent_v=entropy_v(tt,vol,plot,prt,fix=fix_value)
else:
vol=new_volume(tt,pp)
if dir:
vol=volume_dir(tt,pp)
ent_v=entropy_dir_v(tt, vol, prt)
else:
ent_v=entropy_v(tt,vol,plot,prt)
if prt:
print("Pressure: %5.2f GPa; Volume %8.4f A^3" % (pp, vol))
return None
else:
return ent_v
def thermal_exp_v(tt,vv,plot=False,**kwargs):
"""
Thermal expansion at a given temperature and volume
Args:
tt: temperature
vv: volume
plot (optional): (default False) plots pressure vs T for checking
possible numerical instabilities
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Returns:
thermal expansion (K^-1), bulk modulus (GPa) and pressure (GPa)
at given temperature=tt and volume=vv
Notes:
The value is obtained by calculating (dP/dT)_V divided by K
where K=K0+K'*P; P is obtained by the BM3 EoS's whose parameters
(at temperatures in the range "t_range") are refined by fitting
the free energy F(V,T) curves. The different pressures calculated
(at constant vv) for different T in t_range, are then fitted by a
polynomial of suitable degree ("degree" variable) which is then
derived analytically at the temperature tt, to get (dP/dT)_V
If "fix" > 0.1, the BM3 fitting is done by keeping kp fixed at the
value "fix".
The function outputs the thermal expansion (in K^-1), the bulk
modulus [at the pressure P(vv,tt)] and the pressure (in GPa)
if the boolean "plot" is True (default) a plot of P as a
function of T is plotted, in the range t_range
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
delta=delta_ctrl.get_delta()
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
maxv=max(data_vol_freq)
pressure=[]
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
t_range=np.linspace(min_t,max_t,nump)
for ict in t_range:
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(ict,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(ict)
if bm4.flag:
f1=bm4.pressure(vv,pterm[0],pterm[1],pterm[2],pterm[3])*\
conv/1e-21
else:
f1=bm3(vv,pterm[0],pterm[1],pterm[2])*conv/1e-21
pressure=np.append(pressure,f1)
if (pterm[0]>maxv):
if flag_warning.value:
print("\nWarning: volume out of range; reduce temperature")
flag_warning.off()
flag_warning.inc()
if plot:
plt.figure(5)
plt.plot(t_range,pressure,"*")
plt.title("Pressure (GPa)")
plt.show()
fit=np.polyfit(t_range,pressure,degree)
der1=np.polyder(fit,1)
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(tt,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(tt)
if bm4.flag:
pressure=bm4.pressure(vv,pterm[0],pterm[1],pterm[2],pterm[3])*\
conv/1e-21
else:
pressure=bm3(vv,pterm[0],pterm[1],pterm[2])*conv/1e-21
k=(pterm[1]*conv/1e-21)+pterm[2]*pressure
return np.polyval(der1,tt)/k,k,pressure
def thermal_exp_p(tt,pp,plot=False,exit=False,**kwargs):
"""
Thermal expansion at given temperature and pressure, based on
the computation of K*alpha product.
Args:
tt: temperature
pp: pressure
plot (optional): plots pressure vs T values (see help to
the thermal_exp_v function)
exit: if True, the alpha value is returned without formatting (default False)
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Note:
see help for the thermal_exp_v function
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot,fix=fix_value)
else:
vol=new_volume(tt,pp)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot)
if exit:
return alpha
else:
print("\nThermal expansion: %6.2e K^-1" % alpha)
print("Bulk modulus: %6.2f GPa" % k)
print("Pressure: %6.2f GPa" % pressure)
print("Volume: %8.4f A^3\n" % vol)
def alpha_serie(tini,tfin,npoint,pp,plot=False,prt=True, fit=True,HTlim=0.,\
degree=1, save='', g_deg=1, tex=False, title=True, **kwargs):
"""
Thermal expansion in a temperature range, at a given pressure (pp),
and (optional) fit with a polynomium whose powers are specified
in the input.txt file
Note:
The computation is perfomed by using the thermal_exp_v function
that is based on the evaluation of K*alpha product (for details,
see the documentation of the thermal_exp_v function).
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if HTlim > 0.:
alpha_limit=grun_therm_serie(tini,tfin,npoint=12,HTlim=HTlim,degree=degree,\
g_deg=g_deg, ex=True)
t_range=np.linspace(tini,tfin,npoint)
alpha_serie=[]
for ict in t_range:
if fixpar:
vol=new_volume(ict,pp,fix=fix_value)
[alpha_i,k,pressure]=thermal_exp_v(ict,vol,plot,fix=fix_value)
else:
vol=new_volume(ict,pp)
[alpha_i,k,pressure]=thermal_exp_v(ict,vol,plot)
alpha_serie=np.append(alpha_serie,alpha_i)
if HTlim > 0:
t_range=np.append(t_range,HTlim)
alpha_serie=np.append(alpha_serie,alpha_limit)
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
fig=plt.figure(10)
ax=fig.add_subplot(111)
ax.plot(t_range,alpha_serie,"k*")
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
if latex.flag:
ax.set_xlabel("T (K)", fontsize=fontsize)
ax.set_ylabel(r'$\alpha$ (K$^{-1}$)', fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
ax.set_xlabel("T (K)")
ax.set_ylabel("Alpha (K^-1)")
if title:
plt.title("Thermal expansion")
if prt:
serie=(t_range, alpha_serie)
df=pd.DataFrame(serie,index=['Temp.','alpha'])
df=df.T
print("\n")
df['alpha']=df['alpha'].map('{:,.3e}'.format)
df['Temp.']=df['Temp.'].map('{:,.2f}'.format)
print(df.to_string(index=False))
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_fun,t_range,alpha_serie,p0=coef_ini)
tvfin=tfin
if HTlim > 0:
tvfin=HTlim
t_value=np.linspace(tini,tvfin,pr.ntemp_plot_cp)
alpha_value=[]
for ict in t_value:
alpha_i=alpha_fun(ict,*alpha_fit)
alpha_value=np.append(alpha_value,alpha_i)
plt.plot(t_value,alpha_value,"k-")
if save !='':
plt.savefig(fname=path+'/'+save,dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
if prt:
return None
elif fit:
return alpha_fit
else:
return None
def alpha_fun(tt,*coef):
"""
Outputs the thermal expansion at a given temperature, from
the fit obtained with the alpha_serie function
"""
alpha=0.
jc=0
while jc<lpow_a:
alpha=alpha+coef[jc]*(tt**power_a[jc])
jc=jc+1
return alpha
def dalpha_dt(tt,pp,**kwargs):
"""
Outputs the derivative of alpha with respect to T
at constant pressure. It is used by dCp_dP
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
delta=pr.delta_alpha
nump=pr.nump_alpha
degree=pr.degree_alpha
alpha=[]
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
t_range=np.linspace(min_t,max_t,nump)
for ict in t_range:
if fixpar:
alpha_i=thermal_exp_p(ict,pp,fix=fix_value,exit=True)
else:
alpha_i=thermal_exp_p(ict,pp,exit=True)
alpha=np.append(alpha,alpha_i)
fit=np.polyfit(t_range,alpha,degree)
dfit=np.polyder(fit,1)
return np.polyval(dfit,tt)
def alpha_dir(tt,pp):
"""
Calculation of the thermal expansion at a given temperature and
pressure. The computation is done by following the definition of
alpha, as alpha=1/V (dV/dT)_P.
Args:
tt: temperature (K)
pp: pressure (GPa)
Note:
The calculation of the volume at a ginen temperature is done
by the volume_dir function
"""
dt=delta_ctrl.get_delta()
nt=delta_ctrl.get_nump()
dt2=dt/2.
deg=delta_ctrl.get_degree()
alpha_opt.on()
v0=volume_dir(tt,pp,alpha_flag_1=True, alpha_flag_2=False)
if not vol_opt.flag:
alpha_opt.off()
t_list=np.linspace(tt-dt2, tt+dt2, nt)
vl=np.array([])
tl=np.array([])
for it in t_list:
iv=volume_dir(it,pp,alpha_flag_1=True, alpha_flag_2=True)
if vol_opt.flag:
vl=np.append(vl,iv)
tl=np.append(tl,it)
fit=np.polyfit(tl,vl,deg)
fit_d=np.polyder(fit,1)
alpha=np.polyval(fit_d,tt)
alpha=alpha/v0
return alpha
def alpha_dir_v(tmin, tmax, nt=12, type='spline', deg=4, smooth=0.001, comp=False, fit=False, trim=0., phase=''):
"""
Computes thermal expansion from the derivative of a V(T) function
calculated on a generally large T range.
Args:
tmin: minimum temperature
tmax: maximum temperature
nt: number of T points in the range (default 12)
type: if 'spline' (default), a spline fit of the V(T) values is performed;
otherwise a polynomial fit is chosen.
deg: degree of the spline (or polynomial) fit of the V(T) values (default 4)
smooth: smoothness parameter of the spline fit (default 0.001);
relevant if type='spline'
comp: if True, the thermal expansions from other methods
are also computed and plotted (default False)
fit: if True, a power serie fit is performed and parameters are returned
trim: if trim > 0. and if fit=True, the power serie fit is done over the
[tmin, tmax-trim] T-range, to avoid possible fitting problems at the end of the
high temperature interval
phase: if not empty and if fit=True, uploads the coefficients of the
power serie fit for the selected phase (default '')
Note:
The spline fit is performed on the Log(V) values; the derivative
of the spline fit does coincide with the definition of thermal expansion
Note:
the volume at each temperature is computed by using the volume_dir function
Note:
Without selecting phase, to upload the parameters from the power serie fit,
execute the alpha_dir_v function by saving the output in a variable;
then use the load_alpha method of the mineral class to upload the variable.
"""
print("\nSummary of the input parameters\n")
print("T range: %5.1f, %5.1f K, Num. of points: %4i" % (tmin, tmax, nt))
if type=='spline':
print("Type of Log(V) fit: %s, degree: %2i, smooth: %5.4f" % (type, deg, smooth))
else:
print("Type of Log(V) fit: %s, degree: %2i" % (type, deg))
print("Compare with other methods to compute alpha: %s" % comp)
print("Fit alpha values to a power serie: %s" % fit)
if fit:
print("Trim applied to T and alpha values for the power serie fit: %5.1f" % trim)
if phase != '':
print("Power serie coefficient uploaded for the phase %s" % phase)
print("")
t_list=np.linspace(tmin, tmax, nt)
v_list=np.array([])
# internal flag: complete calculation if all the three flags
# are set to True.
# flag[0]: calculation from volume_dir
# flag[1]: calculation from EoS
# flag[2]: calculation from volume_from_F
flag=[True, True, True]
for it in t_list:
iv=volume_dir(it,0)
v_list=np.append(v_list,iv)
if comp:
al_list=np.array([])
therm_list=np.array([])
if flag[0]:
for it in t_list:
ial=alpha_dir(it,0)
al_list=np.append(al_list, ial)
if flag[1]:
if f_fix.flag:
reset_fix()
for it in t_list:
ith=thermal_exp_p(it,0., exit=True)[0]
therm_list=np.append(therm_list, ith)
if flag[2]:
alpha_from_F=volume_from_F_serie(tmin, tmax, nt, expansion=True, debug=False,\
export_alpha=True)
v_log=np.log(v_list)
if type=='spline':
v_log_fit=UnivariateSpline(t_list, v_log, k=deg, s=smooth)
alpha_fit=v_log_fit.derivative()
alpha_calc=alpha_fit(t_list)
else:
v_log_fit=np.polyfit(t_list, v_log, deg)
alpha_fit=np.polyder(v_log_fit,1)
alpha_calc=np.polyval(alpha_fit, t_list)
t_plot=np.linspace(tmin,tmax, nt*10)
if type=='spline':
v_log_plot=v_log_fit(t_plot)
alpha_plot=alpha_fit(t_plot)
else:
v_log_plot=np.polyval(v_log_fit, t_plot)
alpha_plot=np.polyval(alpha_fit, t_plot)
if fit:
t_trim=np.copy(t_list)
alpha_trim=np.copy(alpha_calc)
if trim > 0.1:
trim_idx=(t_trim < (tmax-trim))
t_trim=t_list[trim_idx]
alpha_trim=alpha_trim[trim_idx]
coef_ini=np.ones(lpow_a)
fit_al,_=curve_fit(alpha_dir_fun,t_trim,alpha_trim,p0=coef_ini)
alpha_fit_plot=list(alpha_dir_fun(it, *fit_al) for it in t_plot)
plt.figure()
plt.plot(t_list, v_log,"k*", label="Actual Log(V) values")
plt.plot(t_plot, v_log_plot, "k-", label="Spline fit")
plt.xlabel("T (K)")
plt.ylabel("Log(V)")
plt.xlim(tmin, tmax)
plt.title("Log(V) vs T")
plt.legend(frameon=False)
plt.show()
plt.figure()
plt.plot(t_plot, alpha_plot, "k-", label="From V(T) fit")
if comp:
if flag[2]:
plt.plot(t_list, alpha_from_F, "ko", label="From Volume_from_F")
if flag[0]:
plt.plot(t_list, al_list, "k*", label="From definition (dir)")
if flag[1]:
plt.plot(t_list, therm_list, "k+", label="From (dP/dT)_V and EoS")
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.xlim(tmin, tmax)
plt.legend(frameon=False)
plt.title("Thermal expansion")
plt.show()
if fit:
plt.figure()
plt.plot(t_list, alpha_calc, "k*", label="Actual values")
plt.plot(t_plot, alpha_fit_plot, "k-", label="Power serie fit")
plt.xlabel("T (K)")
plt.xlim(tmin, tmax)
plt.ylabel("Alpha (K^-1)")
plt.legend(frameon=False)
plt.title("Alpha: power serie fit")
plt.show()
if comp & flag[0] & flag[1] & flag[2]:
fmt="{:4.2e}"
fmt2="{:11.4f}"
fmt3="{:6.1f}"
alpha_calc=list(fmt.format(ia) for ia in alpha_calc)
al_list=list(fmt.format(ia) for ia in al_list)
therm_list=list(fmt.format(ia) for ia in therm_list)
alpha_from_F=list(fmt.format(ia) for ia in alpha_from_F)
v_list=list(fmt2.format(iv) for iv in v_list)
t_list=list(fmt3.format(it) for it in t_list)
serie=(t_list,v_list,alpha_calc,alpha_from_F,al_list,therm_list)
df=pd.DataFrame(serie,\
index=[' Temp',' V ',' (1) ',' (2) ', ' (3) ', ' (4) '])
df=df.T
print("")
print(df.to_string(index=False))
print("")
print("(1) from V(T) fit")
print("(2) from V(T) from F fit")
print("(3) from the definition ('dir' computation)")
print("(4) From (dP/dT)_V and EoS")
else:
fmt="{:4.2e}"
fmt2="{:11.4f}"
fmt3="{:6.1f}"
alpha_calc=list(fmt.format(ia) for ia in alpha_calc)
v_list=list(fmt2.format(iv) for iv in v_list)
t_list=list(fmt3.format(it) for it in t_list)
serie=(t_list,v_list,alpha_calc)
df=pd.DataFrame(serie,\
index=[' Temp',' V ',' Alpha'])
df=df.T
print("")
print(df.to_string(index=False))
if fit and (phase != ''):
print("")
eval(phase).load_alpha(fit_al, power_a)
eval(phase).info()
if fit and (phase == ''):
return fit_al
def alpha_dir_serie(tmin, tmax, nt, pp, fit=True, prt=True):
"""
Thermal expansion in a given range of temperatures. The computation
is done by using the alpha_dir function that, in turn, makes use
of the volume_dir function (EoS-free computation of the volume at
a given pressure and temperature).
Args:
tmin, tmax, nt: minimum, maximum temperatures (K) and number of points
in the T-range
pp: pressure (GPa)
fit: if True, a power serie fit of the alpha values is performed
(see ALPHA keyword in the input file)
prt: if True, a detailed output is printed.
"""
t_list=np.linspace(tmin,tmax,nt)
t_l=np.array([])
alpha_l=np.array([])
for it in t_list:
ial=alpha_dir(it,pp)
if alpha_opt.flag:
alpha_l=np.append(alpha_l,ial)
t_l=np.append(t_l,it)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_l,alpha_l,p0=coef_ini)
if fit:
t_list=np.linspace(tmin,tmax,nt*4)
alpha_fit_c=alpha_dir_fun(t_list,*alpha_fit)
fig=plt.figure()
ax = fig.add_subplot(111)
ax.plot(t_l,alpha_l,"k*")
if fit:
ax.plot(t_list, alpha_fit_c,"k-")
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.title("Thermal expansion")
plt.show()
if prt:
fmt1="{:5.1f}"
fmt2="{:4.2e}"
t_l=list(fmt1.format(it) for it in t_l)
alpha_l=list(fmt2.format(ia) for ia in alpha_l)
serie=(t_l, alpha_l)
df=pd.DataFrame(serie,index=['Temp.',' Alpha '])
df=df.T
print("\n")
print(df.to_string(index=False))
print("")
volume_ctrl.shift=0.
if fit:
return alpha_fit
def alpha_dir_fun(tt,*coef):
"""
Outputs the thermal expansion at a given temperature, from
the fit obtained with the alpha_dir_serie function
"""
alpha=0.
jc=0
while jc<lpow_a:
alpha=alpha+coef[jc]*(tt**power_a[jc])
jc=jc+1
return alpha
def alpha_dir_from_dpdt(tt, pp, prt=False):
"""
Computes thermal expansion, at any temperature and pressure, from the
K*alpha product, by using 'dir' functions only (no equation of state
involved at any step). In particular, the required (dP/dT)_V derivative
is calculated from pressures obtained by the pressure_dir function; the
volume and the bulk modulus at T, P is obtained by means of the
bulk_modulus_p function (with noeos=True)
Args:
tt: temperature (K)
pp: pressure (GPa)
prt: is True, alpha, K and V are printed; otherwise unformatted values
are returned (default False)
"""
bulk, vol=bulk_modulus_p(tt, pp, noeos=True, prt=False)
delta=delta_ctrl.get_delta()
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
delta=delta/2.
t_list=np.linspace(tt-delta, tt+delta, nump)
pressure_list=np.array([])
for it in t_list:
ip=pressure_dir(it, vol)
pressure_list=np.append(pressure_list, ip)
fit=np.polyfit(t_list, pressure_list, degree)
fitder=np.polyder(fit,1)
k_alpha=np.polyval(fitder, tt)
alpha=k_alpha/bulk
if prt:
print("Thermal expansion: %6.2e (K^-1)" % alpha)
print("Bulk modulus: %6.2f (GPa) " % bulk)
print("Volume: %8.4f (A^3) " % vol)
else:
return alpha, bulk, vol
def alpha_dir_from_dpdt_serie(tmin, tmax, nt=12, pp=0, fit=False, phase='',
save=False, title=True, tex=False):
"""
Thermal expansion in a T-range. The function makes use of the
alpha_dir_from_dpdt function.
Args:
tmin, tmax: minimum and maximum temperature (in K)
nt: number of points in the T-range (default 12)
pp: pressure (GPa)
fit: if True, a power series fit is performed
phase: if not equal to '', and fit is True, the coefficients
of the power series fit are uploaded in the internal database
(default '')
save: if True, a figure is saved in a file (default False)
tex: if True, latex format is used for the figure (default False)
title: if False, the title printing is suppressed (default True)
Note:
If a phase is specified and fit is True, use the export function to
upload the parameters of the power series in the database file
Example:
>>> alpha_dir_from_dpdt_serie(100, 500, fit=True, phase='py')
>>> export('py')
"""
t_list=np.linspace(tmin, tmax, nt)
alpha_list=np.array([])
for it in t_list:
ia,_,_=alpha_dir_from_dpdt(it, pp, prt=False)
alpha_list=np.append(alpha_list, ia)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
if fit:
t_plot=np.linspace(tmin,tmax,nt*4)
alpha_fit_plot=alpha_dir_fun(t_plot,*alpha_fit)
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
plt.figure()
tit_text="Thermal expansion at pressure "+str(pp)+" GPa"
plt.plot(t_list, alpha_list, "k*", label="Actual values")
if fit:
plt.plot(t_plot, alpha_fit_plot, "k-", label="Power series fit")
if latex.flag:
plt.xlabel("T (K)", fontsize=fontsize)
plt.ylabel(r'$\alpha$ (K$^{-1}$)', fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
if fit:
plt.legend(frameon=False, prop={'size': fontsize})
if title:
plt.suptitle(tit_text, fontsize=fontsize)
else:
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
if fit:
plt.legend(frameon=False)
if title:
plt.title(tit_text)
if save:
name=path+'/'+'alpha_from_dpdt.'+ext
plt.savefig(name, dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
if fit and (phase != ''):
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
elif fit:
return alpha_fit
def cp_dir(tt,pp, prt=False):
"""
Computes the specific heat at constant pressure by using 'dir' functions.
In particular, at T and P, the equilibrium volume, the entropy, the specific
heat at constant volume and the thermal expansion are calculated by respectively
using the volume_dir, the entropy_dir_v and the alpha_dir_from_dpdt functions;
bulk modulus is evaluated by means of the bulk_modulus_p function with the
option noeos set to True (the volume and bulk modulus values are from the
alpha_dir_from_dpdt function output, too).
Args:
tt: temperature (K)
pp: pressure (GPa)
prt: if True a detailed output is printed
"""
if disp.flag:
if not disp.thermo_vt_flag:
disp.thermo_vt_on()
alpha, k, vol=alpha_dir_from_dpdt(tt,pp, prt=False)
ent,cv=entropy_dir_v(tt, vol)
cp=cv+vol*(avo*1e-30/zu)*tt*k*1e9*alpha**2
if prt:
print("Cp: %6.2f, Cv: %6.2f, S %6.2f (J/K mol)" % (cp, cv, ent))
print("K: %6.2f (GPa), Alpha: %6.2e (K^-1), Volume: %8.4f (A^3)" % (k, alpha, vol))
else:
return cp
def cp_dir_serie(tmin, tmax, nt, pp=0):
t_list=np.linspace(tmin, tmax, nt)
cp_list=np.array([cp_dir(it, pp) for it in t_list])
plt.figure()
plt.plot(t_list, cp_list, "k-")
plt.show()
def cp(tt,pp,plot=False,prt=False,dul=False,**kwargs):
"""
Specific heat at constant pressure (Cp) and entropy (S)
Args:
tt: temperature
pp: pressure
fix (optional): optimizes Kp if fix=0, or keeps Kp
fixed if fix=Kp > 0
plot (optional): checks against numerical issues
(experts only)
prt (optional): prints formatted results
Note:
Cp = Cv + V*T*K*alpha^2
Cp, Cv (J/mol K), Cp/Cv, alpha (K^-1), K=K0+K'P (GPa)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
pr_e=False
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
[ent,cv]=entropy_v(tt,vol,plot,pr_e,fix=fix_value)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot,fix=fix_value)
else:
vol=new_volume(tt,pp)
[ent,cv]=entropy_v(tt,vol,plot,pr_e)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot)
cp=cv+vol*(avo*1e-30/zu)*tt*k*1e9*alpha**2
if prt:
print("\nCp: %6.2f, Cv: %6.2f, Cp/Cv: %7.5f, alpha: %6.3e, K: %6.2f\n"\
% (cp, cv, cp/cv, alpha, k))
return None
elif dul == False:
return cp[0], ent
else:
return cp[0],ent,cp/cv
def cp_fun(tt,*coef):
"""
Computes the specific heat a constant pressure, at a given temperature
from the fit Cp(T) performed with the cp_serie function
"""
cp=0.
jc=0
while jc<lpow:
cp=cp+coef[jc]*(tt**power[jc])
jc=jc+1
return cp
def dcp_dp(tt,pp,**kwargs):
"""
Derivative of Cp with respect to P (at T constant)
Args:
tt: temperature
pp: pressure
fix (optional): fixed Kp value; if fix=0., Kp is
optimized
Notes:
The derivative is evaluated from the relation
(dCp/dP)_T = -VT[alpha^2 + (d alpha/dT)_P]
It is **strongly** advised to keep Kp fixed (Kp=fix)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
dalpha=dalpha_dt(tt,pp,fix=fix_value)
alpha,k,pres=thermal_exp_v(tt,vol,fix=fix_value,plot=False)
else:
vol=new_volume(tt,pp)
dalpha=dalpha_dt(tt,pp)
alpha,k,pres=thermal_exp_v(tt,vol,plot=False)
dcp=-1*(vol*avo*1e-21/zu)*tt*(alpha**2+dalpha)
print("\n(dCp/dP)_T: %5.2f J/(mol K GPa) " % dcp)
print("(dAlpha/dT)_P: %6.2e K^-2 " % dalpha)
def compare_exp(graph_exp=True, unit='j' ,save="",dpi=300,**kwargs):
"""
Compare experimental with computed data for Cp and S;
makes a plot of the data
Args:
graph_exp: if True, a plot of Cp vs T is produced
unit: unit of measure of experimental data; allowed values are 'j' or
'cal' (default 'j')
save: file name to save the plot (no file written by default)
dpi: resolution of the image (if 'save' is given)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if (unit == 'j') or (unit == 'J'):
conv_f=1.
elif (unit == 'cal') or (unit == 'CAL'):
conv_f=4.184
else:
print("Warning: unit %s is unknow. J is assumed" % unit)
conv_f=1.
if not flag_exp:
print("Warning: experimental data file not found")
return
t_list=data_cp_exp[:,0]
cp_exp_list=data_cp_exp[:,1]*conv_f
s_exp_list=data_cp_exp[:,2]*conv_f
cp_calc=[]
s_calc=[]
for ti in t_list:
if fixpar:
cp_i, ent_i=cp(ti,0.,fix=fix_value,plot=False,prt=False)
else:
cp_i, ent_i=cp(ti,0.,plot=False,prt=False)
cp_calc=np.append(cp_calc,cp_i)
s_calc=np.append(s_calc, ent_i)
cp_diff=cp_calc-cp_exp_list
s_diff=s_calc-s_exp_list
exp_serie=(t_list,cp_exp_list,cp_calc,cp_diff,s_exp_list,s_calc,\
s_diff)
df= | pd.DataFrame(exp_serie,\
index=['Temp','Cp exp','Cp calc','Del Cp','S exp','S calc','Del S']) | pandas.DataFrame |
#%%
# Our numerical workhorses
import numpy as np
import pandas as pd
import itertools
# Import libraries to parallelize processes
from joblib import Parallel, delayed
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
# Seaborn, useful for graphics
import seaborn as sns
# Pickle is useful for saving outputs that are computationally expensive
# to obtain every time
import pickle
import os
import glob
import git
# Import the project utils
import ccutils
#%%
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Read MaxEnt distributions
print('Reading MaxEnt distributions')
df_maxEnt_mRNA = pd.read_csv(
f"{homedir}/data/csv_maxEnt_dist/MaxEnt_Lagrange_mult_mRNA.csv"
)
# Define dictionaries to map operator to binding energy and rbs to rep copy
op_dict = dict(zip(["O1", "O2", "O3"], [-15.3, -13.9, -9.7]))
rbs_dict = dict(
zip(
["HG104", "RBS1147", "RBS446", "RBS1027", "RBS1", "RBS1L"],
[22, 60, 124, 260, 1220, 1740],
)
)
# Define sample space
mRNA_space = np.arange(0, 100)
protein_space = np.array([0])
# Group df_maxEnt by operator and repressor copy number
df_group = df_maxEnt_mRNA.groupby(["operator", "repressor"])
# Define column names for data frame
names = ["operator", "binding_enery", "repressor", "channcap"]
# Initialize data frame to save channel capacity computations
df_channcap = pd.DataFrame(columns=names)
# Define function to compute in parallel the channel capacity
def cc_parallel_mRNA(df_lagrange):
# Build mRNA transition matrix
Qmc = ccutils.channcap.trans_matrix_maxent(
df_lagrange,
mRNA_space,
protein_space,
True
)
# Compute the channel capacity with the Blahut-Arimoto algorithm
cc_m, _, _ = ccutils.channcap.channel_capacity(Qmc.T, epsilon=1e-4)
# Extract operator and repressor copy number
op = df_lagrange.operator.unique()[0]
eRA = df_lagrange.binding_energy.unique()[0]
rep = df_lagrange.repressor.unique()[0]
return [op, eRA, rep, cc_m]
# Run the function in parallel
print('Running Blahut-Arimoto algorithm in multiple cores')
ccaps = Parallel(n_jobs=6)(
delayed(cc_parallel_mRNA)(df_lagrange)
for group, df_lagrange in df_group
)
# Convert to tidy data frame
ccaps = | pd.DataFrame(ccaps, columns=names) | pandas.DataFrame |
import sys
import time
import math
import warnings
import numpy as np
import pandas as pd
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from fmlc.triggering import triggering
from fmlc.baseclasses import eFMU
from fmlc.stackedclasses import controller_stack
class testcontroller1(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init= False
self.output['c'] = self.input['a'] * self.input['b']
return 'testcontroller1 did a computation!'
class testcontroller2(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(0.2)
return 'testcontroller2 did a computation!'
class testcontroller3(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(1)
return 'testcontroller3 did a computation!'
class testcontroller4(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(10)
return 'testcontroller4 did a computation!'
def test_sampletime():
'''This tests if the sample time is working properly'''
controller = {}
controller['forecast1'] = {'function': testcontroller1, 'sampletime': 3}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True)
now = time.time()
while time.time() - now < 10:
controller.query_control(time.time())
df = pd.DataFrame(controller.log_to_df()['forecast1'])
assert df.shape[0] == 5
for i in (np.diff(df.index) / np.timedelta64(1, 's'))[1:]:
assert(math.isclose(i, 3, rel_tol=0.01))
def test_normal():
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller2, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller3, 'sampletime':2}
controller['forecast3'] = {'function':testcontroller1, 'sampletime': 1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=2, workers=100)
controller.run_query_control_for(5)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 7
assert df2.shape[0] == 4
assert df3.shape[0] == 7
assert df4.shape[0] == 7
assert df5.shape[0] == 7
# Check contents of records
assert pd.isna(df1['a'][0])
assert pd.isna(df1['b'][0])
assert pd.isna(df1['c'][0])
assert pd.isna(df2['a'][0])
assert pd.isna(df2['b'][0])
assert pd.isna(df2['c'][0])
assert pd.isna(df3['a'][0])
assert pd.isna(df3['b'][0])
assert pd.isna(df3['c'][0])
assert pd.isna(df4['a'][0])
assert pd.isna(df4['b'][0])
assert pd.isna(df4['c'][0])
assert pd.isna(df5['a'][0])
assert pd.isna(df5['b'][0])
assert pd.isna(df5['c'][0])
assert list(df1['a'])[1:] == [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
assert list(df1['b'])[1:] == [4.0, 4.0, 4.0, 4.0, 4.0, 4.0]
assert list(df1['c'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df2['a'])[1:] == [20.0, 20.0, 20.0]
assert list(df2['b'])[1:] == [4.0, 4.0, 4.0]
assert list(df2['c'])[1:] == [80.0, 80.0, 80.0]
assert list(df3['a'])[1:] == [30.0, 30.0, 30.0, 30.0, 30.0, 30.0]
assert list(df3['b'])[1:] == [4.0, 4.0, 4.0, 4.0, 4.0, 4.0]
assert list(df3['c'])[1:] == [120.0, 120.0, 120.0, 120.0, 120.0, 120.0]
assert list(df4['a'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df4['b'])[1:] == [10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
assert list(df4['c'])[1:] == [400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
assert list(df5['a'])[1:] == [400.0, 400.0, 400.0, 400.0, 400.0, 400.0]
assert list(df5['b'])[1:] == [40.0, 40.0, 40.0, 40.0, 40.0, 40.0]
assert list(df5['c'])[1:] == [16000.0, 16000.0, 16000.0, 16000.0, 16000.0, 16000.0]
assert list(df1['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df2['logging']) == ['Initialize', 'testcontroller3 did a computation!', 'testcontroller3 did a computation!', 'testcontroller3 did a computation!']
assert list(df3['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df4['logging']) == ['Initialize', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!', 'testcontroller2 did a computation!']
assert list(df5['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
def test_stuckController():
'''This tests if the timeout controllers can be caught'''
## CASE1: mpc1 stuck
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller4, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller1, 'sampletime':1}
controller['forecast3'] = {'function':testcontroller1, 'sampletime':1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=0.5, workers=100)
# Catch warning.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
controller.run_query_control_for(2)
assert len(w) == 3
assert "timeout" in str(w[-1].message)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 4
assert df2.shape[0] == 4
assert df3.shape[0] == 4
#assert df4.shape[0] == 1
assert df5.shape[0] == 1
#assert len(df4.columns) == 1
assert len(df5.columns) == 1
# Check contents of records
assert pd.isna(df1['a'][0])
assert pd.isna(df1['b'][0])
assert pd.isna(df1['c'][0])
assert pd.isna(df2['a'][0])
assert pd.isna(df2['b'][0])
assert pd.isna(df2['c'][0])
assert pd.isna(df3['a'][0])
assert pd.isna(df3['b'][0])
assert pd.isna(df3['c'][0])
assert list(df1['a'])[1:] == [10.0, 10.0, 10.0]
assert list(df1['b'])[1:] == [4.0, 4.0, 4.0]
assert list(df1['c'])[1:] == [40.0, 40.0, 40.0]
assert list(df2['a'])[1:] == [20.0, 20.0, 20.0]
assert list(df2['b'])[1:] == [4.0, 4.0, 4.0]
assert list(df2['c'])[1:] == [80.0, 80.0, 80.0]
assert list(df3['a'])[1:] == [30.0, 30.0, 30.0]
assert list(df3['b'])[1:] == [4.0, 4.0, 4.0]
assert list(df3['c'])[1:] == [120.0, 120.0, 120.0]
assert list(df1['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df2['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
assert list(df3['logging']) == ['Initialize', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!', 'testcontroller1 did a computation!']
#assert list(df4['logging']) == ['Initialize']
assert list(df5['logging']) == ['Initialize']
##CASE2: mpc1 and forcast2 stuck
controller = {}
controller['forecast1'] = {'function':testcontroller1, 'sampletime':1}
controller['mpc1'] = {'function':testcontroller3, 'sampletime':'forecast1'}
controller['control1'] = {'function':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'function':testcontroller3, 'sampletime':1}
controller['forecast3'] = {'function':testcontroller1, 'sampletime':1}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=0.8, workers=100)
#Catch Warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
controller.run_query_control_for(5)
assert len(w) == 12
for m in w:
assert "timeout" in str(m.message)
df1 = pd.DataFrame(controller.log_to_df()['forecast1'])
df2 = pd.DataFrame(controller.log_to_df()['forecast2'])
df3 = pd.DataFrame(controller.log_to_df()['forecast3'])
df4 = pd.DataFrame(controller.log_to_df()['mpc1'])
df5 = pd.DataFrame(controller.log_to_df()['control1'])
# Check number of records
assert df1.shape[0] == 7
#assert df2.shape[0] == 1
assert df3.shape[0] == 7
#assert df4.shape[0] == 1
assert df5.shape[0] == 1
#assert len(df2.columns) == 1
#assert len(df4.columns) == 1
assert len(df5.columns) == 1
# Check contents of records
assert pd.isna(df1['a'][0])
assert pd.isna(df1['b'][0])
assert pd.isna(df1['c'][0])
assert pd.isna(df3['a'][0])
assert | pd.isna(df3['b'][0]) | pandas.isna |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import json
import time
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
from numpy import newaxis
from keras.layers import Dense, Activation, Dropout, LSTM
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge
from math import pi,sqrt,exp,pow,log
from numpy.linalg import det, inv
from abc import ABCMeta, abstractmethod
from sklearn import cluster
import statsmodels.api as sm
import scipy.stats as scs
import scipy.optimize as sco
import scipy.interpolate as sci
from scipy import stats
# In[3]:
df = pd.read_csv("/Users/william/Downloads/DP-LSTM-Differential-Privacy-inspired-LSTM-for-Stock-Prediction-Using-Financial-News-master/data/source_price.csv")
wsj_var=np.var(df.wsj_mean_compound)
cnbc_var=np.var(df.cnbc_mean_compound)
fortune_var=np.var(df.fortune_mean_compound)
reuters_var=np.var(df.reuters_mean_compound)
mu=0
noise=0.1
sigma_wsj=noise*wsj_var
sigma_cnbc=noise*cnbc_var
sigma_fortune=noise*fortune_var
sigma_reuters=noise*reuters_var
n=df.shape[0]
df_noise=pd.DataFrame()
df_noise['wsj_noise']=df['wsj_mean_compound']
df_noise['cnbc_noise']=df['cnbc_mean_compound']
df_noise['fortune_noise']=df['fortune_mean_compound']
df_noise['reuters_noise']=df['reuters_mean_compound']
for i in range(0,n):
df_noise['wsj_noise'][i]+=np.random.normal(mu,sigma_wsj)
df_noise['cnbc_noise'][i]+=np.random.normal(mu,sigma_cnbc)
df_noise['fortune_noise'][i]+=np.random.normal(mu,sigma_fortune)
df_noise['reuters_noise'][i]+=np.random.normal(mu,sigma_reuters)
df_noise.to_csv("/Users/william/Downloads/DP-LSTM-Differential-Privacy-inspired-LSTM-for-Stock-Prediction-Using-Financial-News-master/data/source_price_noise0.csv")
dfn= | pd.read_csv("/Users/william/Downloads/DP-LSTM-Differential-Privacy-inspired-LSTM-for-Stock-Prediction-Using-Financial-News-master/data/source_price_noise0.csv",index_col=0) | pandas.read_csv |
# importing libraries
import pandas as pd
import numpy as np
import cv2
from constants import *
from sklearn.model_selection import train_test_split
from keras.models import Sequential,load_model
from keras.layers import Convolution2D,MaxPooling2D,Flatten,Dense
from keras.callbacks import ModelCheckpoint
dataset = pd.read_csv("data/dataset.csv")
pixels = dataset['pixels'].tolist()
faces = []
for pixel_sequence in pixels:
face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
face = face / 255.0
face = cv2.resize(face.astype('uint8'),
(width, height))
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = | pd.get_dummies(dataset['emotion']) | pandas.get_dummies |
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2018, University of Technology Graz"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>, <NAME>"
import pandas as pd
def time_delta_table(date_time_index, timedelta=pd.Timedelta(minutes=1), monotonic=False):
"""
get the timedelta of data gaps in a dataframe
:type date_time_index: pd.DatetimeIndex
:param timedelta: at witch delta a gap is defined
:type timedelta: Timedelta
:param bool monotonic: whether to look for time gaps or time setbacks
:return: start-time [start], end-time [end], duration of the gap [delta]
:rtype: DataFrame[start, end, delta]
"""
if isinstance(date_time_index, pd.DatetimeIndex) and date_time_index.tzinfo is not None:
temp = pd.Series(data=date_time_index, index=date_time_index)
else:
temp = date_time_index.to_series()
if monotonic:
timedelta = | pd.Timedelta(minutes=0) | pandas.Timedelta |
import pandas as pd
import numpy as np
import os
import cvxpy as cvx
from matplotlib import pyplot as plt
from datetime import datetime
from column_names import ColumnNames
ENCODING = 'iso-8859-8'
# FOLDER_PATH = r'C:\Users\Asus\Google Drive\Votes Migration 2020'
FOLDER_PATH = 'data files'
KNESSET_SIZE = 120
def read_votes_file(filename, how: str = 'settlement'):
if how == 'settlement':
return _read_votes_file_settlement(filename)
elif how == 'kalpi':
return _read_votes_file_kalpi(filename)
else:
raise ValueError
def _read_votes_file_kalpi(filename):
file_path = os.path.join(FOLDER_PATH, filename)
df = | pd.read_csv(file_path, encoding=ENCODING) | pandas.read_csv |
from itertools import chain
import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_number
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.tests.apply.common import (
frame_transform_kernels,
series_transform_kernels,
)
@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"])
@pytest.mark.parametrize(
"args,kwds",
[
pytest.param([], {}, id="no_args_or_kwds"),
pytest.param([1], {}, id="axis_from_args"),
pytest.param([], {"axis": 1}, id="axis_from_kwds"),
pytest.param([], {"numeric_only": True}, id="optional_kwds"),
pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how):
if len(args) > 1 and how == "agg":
request.node.add_marker(
pytest.mark.xfail(
raises=TypeError,
reason="agg/apply signature mismatch - agg passes 2nd "
"argument to func",
)
)
result = getattr(float_frame, how)(func, *args, **kwds)
expected = getattr(float_frame, func)(*args, **kwds)
tm.assert_series_equal(result, expected)
def test_with_string_args(datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
@pytest.mark.parametrize("op", ["mean", "median", "std", "var"])
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_np_reducer(float_frame, op, how):
# GH 39116
float_frame = DataFrame({"a": [1, 2], "b": [3, 4]})
result = getattr(float_frame, how)(op)
# pandas ddof defaults to 1, numpy to 0
kwargs = {"ddof": 1} if op in ("std", "var") else {}
expected = Series(
getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"]
)
@pytest.mark.parametrize("how", ["transform", "apply"])
def test_apply_np_transformer(float_frame, op, how):
# GH 39116
# float_frame will _usually_ have negative values, which will
# trigger the warning here, but let's put one in just to be sure
float_frame.iloc[0, 0] = -1.0
warn = None
if op in ["log", "sqrt"]:
warn = RuntimeWarning
with tm.assert_produces_warning(warn):
result = getattr(float_frame, how)(op)
expected = getattr(np, op)(float_frame)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", True),
("any", True),
],
),
),
)
def test_agg_cython_table_series(series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform_series(series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.