prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import re
import os
import xml.etree.ElementTree as ET
import pandas as pd
import boto3
import csv
from urllib.parse import unquote_plus
s3_client = boto3.client('s3')
s3 = boto3.resource('s3')
from xml_2_data import mnfp_2_data
from xml_2_data import mnfp1_2_data
from xml_2_data import mnfp2_2_data
from nmfp_rename_vars import nmfp_rename_vars
def lambda_handler(event, context):
# parse the S3 triggered event
debug = False
if debug:
bucket = "fundmapper"
key = "02-RawNMFPs/S000007665/2011-01-06-S000007665.txt"
else:
record = event['Records'][0]
bucket = record['s3']['bucket']['name']
key = unquote_plus(record['s3']['object']['key'])
prefix, series_id, filing = key.split("/")
print(bucket)
print(series_id)
print(filing)
print(key)
# store temporarily
print("download")
s3_client.download_file(bucket, key, f"/tmp/{series_id}_{filing}.txt")
print("downloaded")
# (s3.Object(bucket, key)
# .delete())
print("deleted")
# read
filing = open("/tmp/" + series_id + "_" + filing + ".txt", 'r').read()
filing = filing.replace(":", "")
filing_type = re.search("<TYPE>(.*)\n", filing).group(1)
filing_date = int(re.sub("[^0-9]", "", re.search("CONFORMED PERIOD OF REPORT(.*)\n", filing).group(1))[0:6])
filing_year = int(re.sub("[^0-9]", "", re.search("CONFORMED PERIOD OF REPORT(.*)\n", filing).group(1))[0:4])
filing = (filing.replace("\n", "")
.replace(' xmlns="http//www.sec.gov/edgar/nmfpsecurities"', '')
.replace(' xmlns="http//www.sec.gov/edgar/nmfpfund"', ""))
print("convert")
if filing_type in ["N-MFP", "N-MFP/A"]:
series_df, class_df, holdings, all_collateral = mnfp_2_data(filing)
series_df, class_df, holdings, all_collateral = nmfp_rename_vars(filing_type, series_df, class_df, holdings,
all_collateral)
if filing_type in ["N-MFP1", "N-MFP1/A"]:
series_df, class_df, holdings, all_collateral = mnfp1_2_data(filing)
if filing_type in ["N-MFP2", "N-MFP2/A"]:
series_df, class_df, holdings, all_collateral = mnfp2_2_data(filing)
# drop , from all fields, GLUE doesn't get it seems...
series_df.replace({",": " "}, regex=True, inplace=True)
class_df.replace({",": " "}, regex=True, inplace=True)
holdings.replace({",": " "}, regex=True, inplace=True)
all_collateral.replace({",": " "}, regex=True, inplace=True)
# add date
series_df['date'], class_df['date'], holdings['date'], all_collateral[
'date'] = filing_date, filing_date, filing_date, filing_date
# add filing type
series_df['filing_type'], class_df['filing_type'], holdings['filing_type'], all_collateral[
'filing_type'] = filing_type, filing_type, filing_type, filing_type,
# add series id
series_df['series_id'], class_df['series_id'], holdings['series_id'], all_collateral[
'series_id'] = series_id, series_id, series_id, series_id
# holdings
holdings_str_columns = ['filing_type', 'repurchaseAgreement', 'securityDemandFeatureFlag',
'guarantorList', 'InvestmentIdentifier', 'NRSRO',
'isFundTreatingAsAcquisitionUnderlyingSecurities',
'finalLegalInvestmentMaturityDate', 'cik', 'weeklyLiquidAssetSecurityFlag', 'rating',
'investmentCategory', 'repurchaseAgreementList', 'dailyLiquidAssetSecurityFlag',
'securityCategorizedAtLevel3Flag', 'CUSIPMember', 'investmentMaturityDateWAM',
'ISINId', 'LEIID', 'titleOfIssuer', 'securityEnhancementsFlag', 'InvestmentTypeDomain',
'securityGuaranteeFlag', 'fundAcqstnUndrlyngSecurityFlag',
'securityEligibilityFlag', 'otherUniqueId', 'demandFeatureIssuerList', 'nameOfIssuer',
'illiquidSecurityFlag', 'series_id']
holdings_float_columns = ['yieldOfTheSecurityAsOfReportingDate', 'investmentMaturityDateWAL',
'AvailableForSaleSecuritiesAmortizedCost',
'includingValueOfAnySponsorSupport', 'excludingValueOfAnySponsorSupport',
'InvestmentOwnedBalancePrincipalAmount', 'percentageOfMoneyMarketFundNetAssets', ]
holdings_int_columns = ['date', 'issuer_number']
holdings_columns = holdings_str_columns + holdings_float_columns + holdings_int_columns
holdings_data = | pd.DataFrame(columns=holdings_columns) | pandas.DataFrame |
#!/usr/bin/python
# coding=utf-8
# 采用TF-IDF方法提取文本关键词
# http://scikit-learn.org/stable/modules/feature_extraction.html#tfidf-term-weighting
import sys,codecs
import pandas as pd
import numpy as np
import jieba.posseg
import jieba.analyse
# from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
"""
TF-IDF权重:
1、CountVectorizer 构建词频矩阵
2、TfidfTransformer 构建tfidf权值计算
3、文本的关键字
4、对应的tfidf矩阵
"""
# 数据预处理操作:分词,去停用词,词性筛选
def dataPrepos(text, stopkey,pos):
l = []
# pos = ['n','v','vn']
#'nz',#名词
#'v',
# 'vd',
#'vn',#动词
#'l',
#'a',#形容词
# 'd'#副词
#] # 定义选取的词性
seg = jieba.posseg.cut(text) # 分词
for i in seg:
if i.word not in stopkey and i.flag in pos: # 去停用词 + 词性筛选
# if i.word not in stopkey: # 去停用词 + 词性筛选
l.append(i.word)
return l
def preprocess_for_corpus(text, stopkey,pos):
l = []
seg = jieba.posseg.cut(text)
for i in seg:
if i.word not in stopkey and i.flag in pos: # 去停用词 + 词性筛选
l.append(i.word)
return ' '.join(l)
# tf-idf获取文本top10关键词
def getKeywords_tfidf(data,stopkey,topK,pos):
idList, titleList, abstractList = data['id'], data['title'], data['abstract']
corpus = [] # 将所有文档输出到一个list中,一行就是一个文档
for index in range(len(idList)):
text = '%s。%s' % (titleList[index], abstractList[index]) # 拼接标题和摘要
text = dataPrepos(text,stopkey,pos) # 文本预处理
text = " ".join(text) # 连接成字符串,空格分隔
corpus.append(text)
# 1、构建词频矩阵,将文本中的词语转换成词频矩阵
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(corpus) # 词频矩阵,a[i][j]:表示j词在第i个文本中的词频
# 2、统计每个词的tf-idf权值
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(X)
# 3、获取词袋模型中的关键词
word = vectorizer.get_feature_names()
# 4、获取tf-idf矩阵,a[i][j]表示j词在i篇文本中的tf-idf权重
weight = tfidf.toarray()
# 5、打印词语权重
ids, titles, keys = [], [], []
for i in range(len(weight)):
print(u"-------这里输出第", i+1 , u"篇文本的词语tf-idf------")
ids.append(idList[i])
titles.append(titleList[i])
df_word,df_weight = [],[] # 当前文章的所有词汇列表、词汇对应权重列表
for j in range(len(word)):
print( word[j], weight[i][j])
df_word.append(word[j])
df_weight.append(weight[i][j])
df_word = pd.DataFrame(df_word, columns=['word'])
df_weight = pd.DataFrame(df_weight, columns=['weight'])
word_weight = pd.concat([df_word, df_weight], axis=1) # 拼接词汇列表和权重列表
word_weight = word_weight.sort_values(by="weight", ascending = False) # 按照权重值降序排列
keyword = np.array(word_weight['word']) # 选择词汇列并转成数组格式
word_split = [keyword[x] for x in range(0,topK)] # 抽取前topK个词汇作为关键词
word_split = " ".join(word_split)
# keys.append(word_split.encode("utf-8"))
keys.append(word_split)
result = pd.DataFrame({"id": ids, "title": titles, "key": keys},columns=['id','title','key'])
return result
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
# use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
# word index and corresponding tf-idf score
for idx, score in sorted_items:
# keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
# create a tuples of feature,score
# results = zip(feature_vals,score_vals)
results = {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]] = score_vals[idx]
return results
def getKeywords(filename, data, stopkey, pos):
labels, abstractList = data['label'], data['content']
data['ci'] = data.apply(lambda row: preprocess_for_corpus(row['content'], stopkey, pos), axis=1)
corpus = data['ci'].tolist()
# 1、构建词频矩阵,将文本中的词语转换成词频矩阵
# very important, in default case of CV, it will drop word with less than 2 of the length of the word's chars
vectorizer = CountVectorizer(min_df=1, token_pattern='(?u)\\b\\w+\\b')
print('getKeywords 2')
X = vectorizer.fit_transform(corpus) # 词频矩阵,a[i][j]:表示j词在第i个文本中的词频
print('getKeywords 3')
# 2、统计每个词的tf-idf权值
transformer = TfidfTransformer()
print('getKeywords 4')
tfidf = transformer.fit_transform(X)
word = vectorizer.get_feature_names()
# 3、获取词袋模型中的关键词
# sort the tf-idf vectors by descending order of scores
sorted_items = sort_coo(tfidf.tocoo())
# extract only the top n; n here is 10
keywords = extract_topn_from_vector(word, sorted_items, 1000)
return keywords
def tfidf_getKeywords(path_in, path_out, path_stop, topk, pos = ['n','v','vn','a','vd']):
df = | pd.read_csv(path_in) | pandas.read_csv |
#put the columns two at a time in a dataframe
# dataframe and visualization tools
import pandas as pd
import numpy as np
import matplotlib as mlp
import time
from matplotlib import pyplot as plt
import wx
import os
import numpy.polynomial.polynomial as poly
import statistics as stats
from statistics import mode
from scipy.fft import *
import warnings
warnings.filterwarnings("ignore")
#style and formating
pd.options.display.float_format = '{:.15f}'.format
mlp.style.use('tableau-colorblind10')
mlp.rcParams['figure.dpi']= 300
mlp.rcParams['font.family'] = 'Arial'
mlp.rcParams['figure.figsize'] = [14, 10]
mlp.rcParams['figure.facecolor'] = 'white'
mlp.rcParams['axes.edgecolor'] = 'grey'
mlp.rcParams['axes.spines.top'] = False
mlp.rcParams['axes.spines.right'] = False
mlp.rcParams['axes.xmargin'] = 0.15
mlp.rcParams['axes.ymargin'] = 0.15
class NoiseAnalysis():
def __init__(self):
self.samples_list=[]
self.noise_list=[]
self.LoD_list=[]
self.LoQ_list=[]
self.true = ['1', 't', 'tr', 'tru', 'true', 'truee', 'y', 'ye', 'yes', 'yess', 'yeah', 'yu', 'yup', 'yupp', 'sure', 'certainly', 'yay']
self.ChangeDefaults = 'False'
self.SetSampleSize = 'False'
self.SampleSize = 20000
self.SelectRange = 'False'
self.Start = -100000
self.End = 100000
self.DetectSignal ='True'
self.Threshold = 1.0
self.PolyFit = 'True'
self.Degree = 4
self.RemoveOutliers = 'False'
self.nStandardDeviations = 0.0
self.FourierApproximation = 'True'
self.nHarmonics = 10
self.RMS_noise_summary = pd.DataFrame()
#open a windows file explorer and select file path; save file path
def get_paths(self):
app = wx.App(None)
style = wx.FD_MULTIPLE
dialog = wx.FileDialog(None, 'Select File', wildcard='*.csv;*.arw', style=style)
if dialog.ShowModal() == wx.ID_OK:
paths = dialog.GetPaths()
else:
paths = None
dialog.Destroy()
return paths
#read file and save data to a Dataframe
def read_files(self, paths):
df = | pd.DataFrame() | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
return 'Processed uploaded data set'
# Sent data to client
@app.route('/data/ClientRequest', methods=["GET", "POST"])
def CollectionData():
json.dumps(DataResultsRaw)
response = {
'Collection': DataResultsRaw
}
return jsonify(response)
def DataSetSelection():
global XDataTest, yDataTest
XDataTest = pd.DataFrame()
global StanceTest
global AllTargets
global target_names
target_namesLoc = []
if (StanceTest):
DataResultsTest = copy.deepcopy(DataResultsRawTest)
for dictionary in DataResultsRawTest:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRawTest.sort(key=lambda x: x[target], reverse=True)
DataResultsTest.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResultsTest:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargetsTest = [o[target] for o in DataResultsRawTest]
AllTargetsFloatValuesTest = []
previous = None
Class = 0
for i, value in enumerate(AllTargetsTest):
if (i == 0):
previous = value
target_namesLoc.append(value)
if (value == previous):
AllTargetsFloatValuesTest.append(Class)
else:
Class = Class + 1
target_namesLoc.append(value)
AllTargetsFloatValuesTest.append(Class)
previous = value
ArrayDataResultsTest = pd.DataFrame.from_dict(DataResultsTest)
XDataTest, yDataTest = ArrayDataResultsTest, AllTargetsFloatValuesTest
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary['_id']
del dictionary['InstanceID']
del dictionary[target]
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = pd.DataFrame.from_dict(DataResults)
global XData, yData, RANDOM_SEED
XData, yData = ArrayDataResults, AllTargetsFloatValues
global XDataStored, yDataStored
XDataStored = XData.copy()
yDataStored = yData.copy()
warnings.simplefilter('ignore')
return 'Everything is okay'
def callPreResults():
global XData
global yData
global target_names
global impDataInst
DataSpaceResMDS = FunMDS(XData)
DataSpaceResTSNE = FunTsne(XData)
DataSpaceResTSNE = DataSpaceResTSNE.tolist()
DataSpaceUMAP = FunUMAP(XData)
XDataJSONEntireSetRes = XData.to_json(orient='records')
global preResults
preResults = []
preResults.append(json.dumps(target_names)) # Position: 0
preResults.append(json.dumps(DataSpaceResMDS)) # Position: 1
preResults.append(json.dumps(XDataJSONEntireSetRes)) # Position: 2
preResults.append(json.dumps(yData)) # Position: 3
preResults.append(json.dumps(AllTargets)) # Position: 4
preResults.append(json.dumps(DataSpaceResTSNE)) # Position: 5
preResults.append(json.dumps(DataSpaceUMAP)) # Position: 6
preResults.append(json.dumps(impDataInst)) # Position: 7
# Sending each model's results to frontend
@app.route('/data/requestDataSpaceResults', methods=["GET", "POST"])
def SendDataSpaceResults():
global preResults
callPreResults()
response = {
'preDataResults': preResults,
}
return jsonify(response)
# Main function
if __name__ == '__main__':
app.run()
# Debugging and mirroring client
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://localhost:8080/{}'.format(path)).text
return render_template("index.html")
# This block of code is for server computations
def column_index(df, query_cols):
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols,query_cols,sorter=sidx)].tolist()
def class_feature_importance(X, Y, feature_importances):
N, M = X.shape
X = scale(X)
out = {}
for c in set(Y):
out[c] = dict(
zip(range(N), np.mean(X[Y==c, :], axis=0)*feature_importances)
)
return out
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/EnsembleMode', methods=["GET", "POST"])
def EnsembleMethod():
global crossValidation
global RANDOM_SEED
global XData
RANDOM_SEED = 42
RetrievedStatus = request.get_data().decode('utf8').replace("'", '"')
RetrievedStatus = json.loads(RetrievedStatus)
modeMethod = RetrievedStatus['defaultModeMain']
if (modeMethod == 'blend'):
crossValidation = ShuffleSplit(n_splits=1, test_size=.20, random_state=RANDOM_SEED)
else:
crossValidation = 5
return 'Okay'
# Initialize every model for each algorithm
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequestSelParameters', methods=["GET", "POST"])
def RetrieveModel():
# get the models from the frontend
RetrievedModel = request.get_data().decode('utf8').replace("'", '"')
RetrievedModel = json.loads(RetrievedModel)
global algorithms
algorithms = RetrievedModel['Algorithms']
toggle = RetrievedModel['Toggle']
global crossValidation
global XData
global yData
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
# loop through the algorithms
global allParametersPerformancePerModel
start = timeit.default_timer()
print('CVorTT', crossValidation)
for eachAlgor in algorithms:
if (eachAlgor) == 'KNN':
clf = KNeighborsClassifier()
params = {'n_neighbors': list(range(1, 25)), 'metric': ['chebyshev', 'manhattan', 'euclidean', 'minkowski'], 'algorithm': ['brute', 'kd_tree', 'ball_tree'], 'weights': ['uniform', 'distance']}
AlgorithmsIDsEnd = 0
elif (eachAlgor) == 'SVC':
clf = SVC(probability=True,random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.1,4.43,0.11)), 'kernel': ['rbf','linear', 'poly', 'sigmoid']}
AlgorithmsIDsEnd = SVCModelsCount
elif (eachAlgor) == 'GauNB':
clf = GaussianNB()
params = {'var_smoothing': list(np.arange(0.00000000001,0.0000001,0.0000000002))}
AlgorithmsIDsEnd = GausNBModelsCount
elif (eachAlgor) == 'MLP':
clf = MLPClassifier(random_state=RANDOM_SEED)
params = {'alpha': list(np.arange(0.00001,0.001,0.0002)), 'tol': list(np.arange(0.00001,0.001,0.0004)), 'max_iter': list(np.arange(100,200,100)), 'activation': ['relu', 'identity', 'logistic', 'tanh'], 'solver' : ['adam', 'sgd']}
AlgorithmsIDsEnd = MLPModelsCount
elif (eachAlgor) == 'LR':
clf = LogisticRegression(random_state=RANDOM_SEED)
params = {'C': list(np.arange(0.5,2,0.075)), 'max_iter': list(np.arange(50,250,50)), 'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'], 'penalty': ['l2', 'none']}
AlgorithmsIDsEnd = LRModelsCount
elif (eachAlgor) == 'LDA':
clf = LinearDiscriminantAnalysis()
params = {'shrinkage': list(np.arange(0,1,0.01)), 'solver': ['lsqr', 'eigen']}
AlgorithmsIDsEnd = LDAModelsCount
elif (eachAlgor) == 'QDA':
clf = QuadraticDiscriminantAnalysis()
params = {'reg_param': list(np.arange(0,1,0.02)), 'tol': list(np.arange(0.00001,0.001,0.0002))}
AlgorithmsIDsEnd = QDAModelsCount
elif (eachAlgor) == 'RF':
clf = RandomForestClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = RFModelsCount
elif (eachAlgor) == 'ExtraT':
clf = ExtraTreesClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(60, 140)), 'criterion': ['gini', 'entropy']}
AlgorithmsIDsEnd = ExtraTModelsCount
elif (eachAlgor) == 'AdaB':
clf = AdaBoostClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(40, 80)), 'learning_rate': list(np.arange(0.1,2.3,1.1)), 'algorithm': ['SAMME.R', 'SAMME']}
AlgorithmsIDsEnd = AdaBModelsCount
else:
clf = GradientBoostingClassifier(random_state=RANDOM_SEED)
params = {'n_estimators': list(range(85, 115)), 'learning_rate': list(np.arange(0.01,0.23,0.11)), 'criterion': ['friedman_mse', 'mse', 'mae']}
AlgorithmsIDsEnd = GradBModelsCount
allParametersPerformancePerModel = GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossValidation)
# New visualization - model space
# header = "model_id,algorithm_id,mean_test_accuracy,mean_test_precision_micro,mean_test_precision_macro,mean_test_precision_weighted,mean_test_recall_micro,mean_test_recall_macro,mean_test_recall_weighted,mean_test_roc_auc_ovo_weighted,geometric_mean_score_micro,geometric_mean_score_macro,geometric_mean_score_weighted,matthews_corrcoef,f5_micro,f5_macro,f5_weighted,f1_micro,f1_macro,f1_weighted,f2_micro,f2_macro,f2_weighted,log_loss\n"
# dataReceived = []
# counter = 0
# for indx, el in enumerate(allParametersPerformancePerModel):
# dictFR = json.loads(el)
# frame = pd.DataFrame.from_dict(dictFR)
# for ind, elInside in frame.iterrows():
# counter = counter + 1
# dataReceived.append(str(counter))
# dataReceived.append(',')
# dataReceived.append(str(indx+1))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_accuracy']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_precision_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_recall_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['mean_test_roc_auc_ovo_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['geometric_mean_score_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['matthews_corrcoef']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f5_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f1_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_micro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_macro']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['f2_weighted']))
# dataReceived.append(',')
# dataReceived.append(str(elInside['log_loss']))
# dataReceived.append("\n")
# dataReceivedItems = ''.join(dataReceived)
# csvString = header + dataReceivedItems
# fw = open ("modelSpace.csv","w+",encoding="utf-8")
# fw.write(csvString)
# fw.close()
# call the function that sends the results to the frontend
stop = timeit.default_timer()
print('Time GridSearch: ', stop - start)
SendEachClassifiersPerformanceToVisualize()
return 'Everything Okay'
location = './cachedir'
memory = Memory(location, verbose=0)
# calculating for all algorithms and models the performance and other results
@memory.cache
def GridSearchForModels(XData, yData, clf, params, eachAlgor, AlgorithmsIDsEnd, toggle, crossVal):
print('loop')
# this is the grid we use to train the models
grid = GridSearchCV(
estimator=clf, param_grid=params,
cv=crossVal, refit='accuracy', scoring=scoring,
verbose=0, n_jobs=-1)
# fit and extract the probabilities
grid.fit(XData, yData)
# process the results
cv_results = []
cv_results.append(grid.cv_results_)
df_cv_results = pd.DataFrame.from_dict(cv_results)
# number of models stored
number_of_models = len(df_cv_results.iloc[0][0])
# initialize results per row
df_cv_results_per_row = []
# loop through number of models
modelsIDs = []
for i in range(number_of_models):
modelsIDs.append(AlgorithmsIDsEnd+i)
# initialize results per item
df_cv_results_per_item = []
for column in df_cv_results.iloc[0]:
df_cv_results_per_item.append(column[i])
df_cv_results_per_row.append(df_cv_results_per_item)
# store the results into a pandas dataframe
df_cv_results_classifiers = pd.DataFrame(data = df_cv_results_per_row, columns= df_cv_results.columns)
# copy and filter in order to get only the metrics
metrics = df_cv_results_classifiers.copy()
metrics = metrics.filter(['mean_test_accuracy','mean_test_precision_micro','mean_test_precision_macro','mean_test_precision_weighted','mean_test_recall_micro','mean_test_recall_macro','mean_test_recall_weighted','mean_test_roc_auc_ovo_weighted'])
# concat parameters and performance
parametersPerformancePerModel = pd.DataFrame(df_cv_results_classifiers['params'])
parametersPerformancePerModel = parametersPerformancePerModel.to_json()
parametersLocal = json.loads(parametersPerformancePerModel)['params'].copy()
Models = []
for index, items in enumerate(parametersLocal):
Models.append(str(index))
parametersLocalNew = [ parametersLocal[your_key] for your_key in Models ]
permList = []
PerFeatureAccuracy = []
PerFeatureAccuracyAll = []
PerClassMetric = []
perModelProb = []
perModelPrediction = []
resultsMicro = []
resultsMacro = []
resultsWeighted = []
resultsCorrCoef = []
resultsMicroBeta5 = []
resultsMacroBeta5 = []
resultsWeightedBeta5 = []
resultsMicroBeta1 = []
resultsMacroBeta1 = []
resultsWeightedBeta1 = []
resultsMicroBeta2 = []
resultsMacroBeta2 = []
resultsWeightedBeta2 = []
resultsLogLoss = []
resultsLogLossFinal = []
loop = 8
# influence calculation for all the instances
inputs = range(len(XData))
num_cores = multiprocessing.cpu_count()
#impDataInst = Parallel(n_jobs=num_cores)(delayed(processInput)(i,XData,yData,crossValidation,clf) for i in inputs)
for eachModelParameters in parametersLocalNew:
clf.set_params(**eachModelParameters)
if (toggle == 1):
perm = PermutationImportance(clf, cv = None, refit = True, n_iter = 25).fit(XData, yData)
permList.append(perm.feature_importances_)
n_feats = XData.shape[1]
PerFeatureAccuracy = []
for i in range(n_feats):
scores = model_selection.cross_val_score(clf, XData.values[:, i].reshape(-1, 1), yData, cv=5)
PerFeatureAccuracy.append(scores.mean())
PerFeatureAccuracyAll.append(PerFeatureAccuracy)
else:
permList.append(0)
PerFeatureAccuracyAll.append(0)
clf.fit(XData, yData)
yPredict = clf.predict(XData)
yPredict = np.nan_to_num(yPredict)
perModelPrediction.append(yPredict)
# retrieve target names (class names)
PerClassMetric.append(classification_report(yData, yPredict, target_names=target_names, digits=2, output_dict=True))
yPredictProb = clf.predict_proba(XData)
yPredictProb = np.nan_to_num(yPredictProb)
perModelProb.append(yPredictProb.tolist())
resultsMicro.append(geometric_mean_score(yData, yPredict, average='micro'))
resultsMacro.append(geometric_mean_score(yData, yPredict, average='macro'))
resultsWeighted.append(geometric_mean_score(yData, yPredict, average='weighted'))
resultsCorrCoef.append(matthews_corrcoef(yData, yPredict))
resultsMicroBeta5.append(fbeta_score(yData, yPredict, average='micro', beta=0.5))
resultsMacroBeta5.append(fbeta_score(yData, yPredict, average='macro', beta=0.5))
resultsWeightedBeta5.append(fbeta_score(yData, yPredict, average='weighted', beta=0.5))
resultsMicroBeta1.append(fbeta_score(yData, yPredict, average='micro', beta=1))
resultsMacroBeta1.append(fbeta_score(yData, yPredict, average='macro', beta=1))
resultsWeightedBeta1.append(fbeta_score(yData, yPredict, average='weighted', beta=1))
resultsMicroBeta2.append(fbeta_score(yData, yPredict, average='micro', beta=2))
resultsMacroBeta2.append(fbeta_score(yData, yPredict, average='macro', beta=2))
resultsWeightedBeta2.append(fbeta_score(yData, yPredict, average='weighted', beta=2))
resultsLogLoss.append(log_loss(yData, yPredictProb, normalize=True))
maxLog = max(resultsLogLoss)
minLog = min(resultsLogLoss)
for each in resultsLogLoss:
resultsLogLossFinal.append((each-minLog)/(maxLog-minLog))
metrics.insert(loop,'geometric_mean_score_micro',resultsMicro)
metrics.insert(loop+1,'geometric_mean_score_macro',resultsMacro)
metrics.insert(loop+2,'geometric_mean_score_weighted',resultsWeighted)
metrics.insert(loop+3,'matthews_corrcoef',resultsCorrCoef)
metrics.insert(loop+4,'f5_micro',resultsMicroBeta5)
metrics.insert(loop+5,'f5_macro',resultsMacroBeta5)
metrics.insert(loop+6,'f5_weighted',resultsWeightedBeta5)
metrics.insert(loop+7,'f1_micro',resultsMicroBeta1)
metrics.insert(loop+8,'f1_macro',resultsMacroBeta1)
metrics.insert(loop+9,'f1_weighted',resultsWeightedBeta1)
metrics.insert(loop+10,'f2_micro',resultsMicroBeta2)
metrics.insert(loop+11,'f2_macro',resultsMacroBeta2)
metrics.insert(loop+12,'f2_weighted',resultsWeightedBeta2)
metrics.insert(loop+13,'log_loss',resultsLogLossFinal)
perModelPredPandas = pd.DataFrame(perModelPrediction)
perModelPredPandas = perModelPredPandas.to_json()
perModelProbPandas = pd.DataFrame(perModelProb)
perModelProbPandas = perModelProbPandas.to_json()
PerClassMetricPandas = pd.DataFrame(PerClassMetric)
del PerClassMetricPandas['accuracy']
del PerClassMetricPandas['macro avg']
del PerClassMetricPandas['weighted avg']
PerClassMetricPandas = PerClassMetricPandas.to_json()
perm_imp_eli5PD = pd.DataFrame(permList)
perm_imp_eli5PD = perm_imp_eli5PD.to_json()
PerFeatureAccuracyPandas = pd.DataFrame(PerFeatureAccuracyAll)
PerFeatureAccuracyPandas = PerFeatureAccuracyPandas.to_json()
bestfeatures = SelectKBest(score_func=chi2, k='all')
fit = bestfeatures.fit(XData,yData)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(XData.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score'] #naming the dataframe columns
featureScores = featureScores.to_json()
# gather the results and send them back
results.append(modelsIDs) # Position: 0 and so on
results.append(parametersPerformancePerModel) # Position: 1 and so on
results.append(PerClassMetricPandas) # Position: 2 and so on
results.append(PerFeatureAccuracyPandas) # Position: 3 and so on
results.append(perm_imp_eli5PD) # Position: 4 and so on
results.append(featureScores) # Position: 5 and so on
metrics = metrics.to_json()
results.append(metrics) # Position: 6 and so on
results.append(perModelProbPandas) # Position: 7 and so on
results.append(json.dumps(perModelPredPandas)) # Position: 8 and so on
return results
# Sending each model's results to frontend
@app.route('/data/PerformanceForEachModel', methods=["GET", "POST"])
def SendEachClassifiersPerformanceToVisualize():
response = {
'PerformancePerModel': allParametersPerformancePerModel,
}
return jsonify(response)
def Remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
if (isinstance(num, float)):
if np.isnan(num):
pass
else:
final_list.append(float(num))
else:
final_list.append(num)
return final_list
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendBrushedParam', methods=["GET", "POST"])
def RetrieveModelsParam():
RetrieveModelsPar = request.get_data().decode('utf8').replace("'", '"')
RetrieveModelsPar = json.loads(RetrieveModelsPar)
counterKNN = 0
counterSVC = 0
counterGausNB = 0
counterMLP = 0
counterLR = 0
counterLDA = 0
counterQDA = 0
counterRF = 0
counterExtraT = 0
counterAdaB = 0
counterGradB = 0
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
global algorithmsList
algorithmsList = RetrieveModelsPar['algorithms']
for index, items in enumerate(algorithmsList):
if (items == 'KNN'):
counterKNN += 1
KNNModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'SVC'):
counterSVC += 1
SVCModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'GauNB'):
counterGausNB += 1
GausNBModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'MLP'):
counterMLP += 1
MLPModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LR'):
counterLR += 1
LRModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'LDA'):
counterLDA += 1
LDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'QDA'):
counterQDA += 1
QDAModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'RF'):
counterRF += 1
RFModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'ExtraT'):
counterExtraT += 1
ExtraTModels.append(int(RetrieveModelsPar['models'][index]))
elif (items == 'AdaB'):
counterAdaB += 1
AdaBModels.append(int(RetrieveModelsPar['models'][index]))
else:
counterGradB += 1
GradBModels.append(int(RetrieveModelsPar['models'][index]))
return 'Everything Okay'
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/factors', methods=["GET", "POST"])
def RetrieveFactors():
global factors
global allParametersPerformancePerModel
Factors = request.get_data().decode('utf8').replace("'", '"')
FactorsInt = json.loads(Factors)
factors = FactorsInt['Factors']
# this is if we want to change the factors before running the search
#if (len(allParametersPerformancePerModel) == 0):
# pass
#else:
global sumPerClassifierSel
global ModelSpaceMDSNew
global ModelSpaceTSNENew
global metricsPerModel
sumPerClassifierSel = []
sumPerClassifierSel = preProcsumPerMetric(factors)
ModelSpaceMDSNew = []
ModelSpaceTSNENew = []
loopThroughMetrics = PreprocessingMetrics()
loopThroughMetrics = loopThroughMetrics.fillna(0)
metricsPerModel = preProcMetricsAllAndSel()
flagLocal = 0
countRemovals = 0
for l,el in enumerate(factors):
if el == 0:
loopThroughMetrics.drop(loopThroughMetrics.columns[[l-countRemovals]], axis=1, inplace=True)
countRemovals = countRemovals + 1
flagLocal = 1
if flagLocal == 1:
ModelSpaceMDSNew = FunMDS(loopThroughMetrics)
ModelSpaceTSNENew = FunTsne(loopThroughMetrics)
ModelSpaceTSNENew = ModelSpaceTSNENew.tolist()
return 'Everything Okay'
@app.route('/data/UpdateOverv', methods=["GET", "POST"])
def UpdateOverview():
ResultsUpdateOverview = []
ResultsUpdateOverview.append(sumPerClassifierSel)
ResultsUpdateOverview.append(ModelSpaceMDSNew)
ResultsUpdateOverview.append(ModelSpaceTSNENew)
ResultsUpdateOverview.append(metricsPerModel)
response = {
'Results': ResultsUpdateOverview
}
return jsonify(response)
def PreprocessingMetrics():
dicKNN = json.loads(allParametersPerformancePerModel[6])
dicSVC = json.loads(allParametersPerformancePerModel[15])
dicGausNB = json.loads(allParametersPerformancePerModel[24])
dicMLP = json.loads(allParametersPerformancePerModel[33])
dicLR = json.loads(allParametersPerformancePerModel[42])
dicLDA = json.loads(allParametersPerformancePerModel[51])
dicQDA = json.loads(allParametersPerformancePerModel[60])
dicRF = json.loads(allParametersPerformancePerModel[69])
dicExtraT = json.loads(allParametersPerformancePerModel[78])
dicAdaB = json.loads(allParametersPerformancePerModel[87])
dicGradB = json.loads(allParametersPerformancePerModel[96])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatMetrics = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
return df_concatMetrics
def PreprocessingPred():
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = pd.DataFrame.from_dict(dicLR)
dfLDA = pd.DataFrame.from_dict(dicLDA)
dfQDA = pd.DataFrame.from_dict(dicQDA)
dfRF = pd.DataFrame.from_dict(dicRF)
dfExtraT = pd.DataFrame.from_dict(dicExtraT)
dfAdaB = pd.DataFrame.from_dict(dicAdaB)
dfGradB = pd.DataFrame.from_dict(dicGradB)
dfKNN.index = dfKNN.index.astype(int)
dfSVC.index = dfSVC.index.astype(int) + SVCModelsCount
dfGausNB.index = dfGausNB.index.astype(int) + GausNBModelsCount
dfMLP.index = dfMLP.index.astype(int) + MLPModelsCount
dfLR.index = dfLR.index.astype(int) + LRModelsCount
dfLDA.index = dfLDA.index.astype(int) + LDAModelsCount
dfQDA.index = dfQDA.index.astype(int) + QDAModelsCount
dfRF.index = dfRF.index.astype(int) + RFModelsCount
dfExtraT.index = dfExtraT.index.astype(int) + ExtraTModelsCount
dfAdaB.index = dfAdaB.index.astype(int) + AdaBModelsCount
dfGradB.index = dfGradB.index.astype(int) + GradBModelsCount
dfKNNFiltered = dfKNN.loc[KNNModels, :]
dfSVCFiltered = dfSVC.loc[SVCModels, :]
dfGausNBFiltered = dfGausNB.loc[GausNBModels, :]
dfMLPFiltered = dfMLP.loc[MLPModels, :]
dfLRFiltered = dfLR.loc[LRModels, :]
dfLDAFiltered = dfLDA.loc[LDAModels, :]
dfQDAFiltered = dfQDA.loc[QDAModels, :]
dfRFFiltered = dfRF.loc[RFModels, :]
dfExtraTFiltered = dfExtraT.loc[ExtraTModels, :]
dfAdaBFiltered = dfAdaB.loc[AdaBModels, :]
dfGradBFiltered = dfGradB.loc[GradBModels, :]
df_concatProbs = pd.concat([dfKNNFiltered, dfSVCFiltered, dfGausNBFiltered, dfMLPFiltered, dfLRFiltered, dfLDAFiltered, dfQDAFiltered, dfRFFiltered, dfExtraTFiltered, dfAdaBFiltered, dfGradBFiltered])
predictions = []
for column, content in df_concatProbs.items():
el = [sum(x)/len(x) for x in zip(*content)]
predictions.append(el)
return predictions
def PreprocessingPredUpdate(Models):
Models = json.loads(Models)
ModelsList= []
for loop in Models['ClassifiersList']:
ModelsList.append(loop)
dicKNN = json.loads(allParametersPerformancePerModel[7])
dicSVC = json.loads(allParametersPerformancePerModel[16])
dicGausNB = json.loads(allParametersPerformancePerModel[25])
dicMLP = json.loads(allParametersPerformancePerModel[34])
dicLR = json.loads(allParametersPerformancePerModel[43])
dicLDA = json.loads(allParametersPerformancePerModel[52])
dicQDA = json.loads(allParametersPerformancePerModel[61])
dicRF = json.loads(allParametersPerformancePerModel[70])
dicExtraT = json.loads(allParametersPerformancePerModel[79])
dicAdaB = json.loads(allParametersPerformancePerModel[88])
dicGradB = json.loads(allParametersPerformancePerModel[97])
dfKNN = pd.DataFrame.from_dict(dicKNN)
dfSVC = pd.DataFrame.from_dict(dicSVC)
dfGausNB = pd.DataFrame.from_dict(dicGausNB)
dfMLP = pd.DataFrame.from_dict(dicMLP)
dfLR = | pd.DataFrame.from_dict(dicLR) | pandas.DataFrame.from_dict |
# Ab initio Elasticity and Thermodynamics of Minerals
#
# Version 2.5.0 27/10/2021
#
# Comment the following three lines to produce the documentation
# with readthedocs
# from IPython import get_ipython
# get_ipython().magic('cls')
# get_ipython().magic('reset -sf')
import datetime
import os
import sys
import scipy
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
# from matplotlib import rc
import pandas as pd
import sympy as sym
import parame as pr
from scipy.optimize import curve_fit, fmin, minimize_scalar, minimize
from scipy.interpolate import UnivariateSpline, Rbf
from scipy import integrate
from plot import plot_class
from mineral_data import mineral, load_database, equilib, reaction,\
pressure_react, export, field, import_database, name_list
from mineral_data import ens, cor, py, coe, q, fo, ky, sill, andal, per, sp, \
mao, fmao, stv, cc, arag, jeff, jeff_fe, jeff_fe3p, jeff_feb
import_database()
mpl.rcParams['figure.dpi']= 80
class latex_class():
"""
Setup for the use of LaTeX for axis labels and titles; sets of parameters
for graphics output.
"""
def __init__(self):
self.flag=False
self.dpi=300
self.font_size=14
self.tick_size=12
self.ext='jpg'
mpl.rc('text', usetex=False)
def on(self):
self.flag=True
mpl.rc('text', usetex=True)
def off(self):
self.flag=False
mpl.rc('text', usetex=False)
def set_param(self, dpi=300, fsize=14, tsize=12, ext='jpg'):
"""
Args:
dpi: resolution of the graphics file (default 300)
fsize: size of the labels of the axes in points (default 14)
tsize: size of the ticks in points (default 12)
ext: extension of the graphics file (default 'jpg'); this argument
is only used in those routines where the name of the file is
automatically produced by the program (e.g. check_poly or
check_spline functions). In other cases, the extension is
directly part of the name of the file given as argument to
the function itself, and 'ext' is ignored.
"""
self.dpi=dpi
self.font_size=fsize
self.tick_size=tsize
self.ext=ext
def get_dpi(self):
return self.dpi
def get_fontsize(self):
return self.font_size
def get_ext(self):
return self.ext
def get_tsize(self):
return self.tick_size
class flag:
def __init__(self,value):
self.value=value
self.jwar=0
def on(self):
self.value=True
def off(self):
self.value=False
def inc(self):
self.jwar += 1
def reset(self):
self.jwar=0
class verbose_class():
def __init__(self,value):
self.flag=value
def on(self):
self.flag=True
print("Verbose mode on")
def off(self):
self.flag=False
print("Verbose mode off")
class BM3_error(Exception):
pass
class vol_corr_class:
def __init__(self):
self.flag=False
self.v0_init=None
def on(self):
self.flag=True
def off(self):
self.flag=False
def set_volume(self,vv):
self.v0_init=vv
class data_info():
"""
Stores information about the current settings
"""
def __init__(self):
self.min_static_vol=None
self.max_static_vol=None
self.static_points=None
self.min_freq_vol=None
self.max_freq_vol=None
self.freq_points=None
self.min_select_vol=None
self.max_select_vol=None
self.select_points=None
self.freq_sets=None
self.fit_type='No fit'
self.min_vol_fit=None
self.max_vol_fit=None
self.fit_points=None
self.fit_degree=None
self.fit_smooth=None
self.k0=None
self.kp=None
self.v0=None
self.temp=None
self.k0_static=None
self.kp_static=None
self.v0_static=None
self.popt=None
self.popt_orig=None
self.min_names=name_list.mineral_names
self.title=None
def show(self):
"""
Prints information about the current settings stored in the classes
"""
if self.title !=None:
print(self.title)
print("\nCurrent settings and results\n")
if self.min_static_vol != None:
print("Static data ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_static_vol, self.max_static_vol, self.static_points))
if self.min_freq_vol != None:
print("Frequency volume range ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_freq_vol, self.max_freq_vol, self.freq_points))
if self.min_select_vol != None:
print("Selected freq. sets ** min, max volumes: %8.4f, %8.4f; points: %d"\
% (self.min_select_vol, self.max_select_vol, self.select_points))
print("Frequency sets: %s" % str(self.freq_sets))
if self.fit_type != 'No fit':
if self.fit_type=='poly':
print("\nFit of frequencies ** type: %s, degree: %d" \
% (self.fit_type, self.fit_degree))
else:
print("\nFit of frequencies ** type: %s, degree: %d, smooth: %2.1f" \
% (self.fit_type, self.fit_degree, self.fit_smooth))
print(" min, max volumes: %8.4f, %8.4f; points %d" %\
(self.min_vol_fit, self.max_vol_fit, self.fit_points))
else:
print("No fit of frequencies")
if supercell.flag:
print("\n*** This is a computation performed on SUPERCELL data")
print(" (SCELPHONO and QHA keywords in CRYSTAL). Number of cells: %3i" % supercell.number)
if self.k0_static != None:
print("\n*** Static EoS (BM3) ***")
print("K0: %6.2f GPa, Kp: %4.2f, V0: %8.4f A^3" %\
(self.k0_static, self.kp_static, self.v0_static))
if static_range.flag:
print("\n*** Static EoS is from a restricted volume range:")
print("Minimum volume: %8.3f" % static_range.vmin)
print("Maximum volume: %8.3f" % static_range.vmax)
if p_stat.flag:
print("\n*** Static EoS from P(V) data ***")
print("Data points num: %3i" % p_stat.npoints)
print("Volume range: %8.4f, %8.4f (A^3)" % (p_stat.vmin, p_stat.vmax))
print("Pressure range: %5.2f, %5.2f (GPa)" % (p_stat.pmax, p_stat.pmin))
print("EoS -- K0: %6.2f (GPa), Kp: %4.2f, V0: %8.4f (A^3)" % (p_stat.k0,\
p_stat.kp, p_stat.v0))
print("Energy at V0: %12.9e (hartree)" % p_stat.e0)
if self.k0 != None:
print("\n** BM3 EoS from the last computation, at the temperature of %5.2f K **" % self.temp)
print("K0: %6.2f GPa, Kp: %4.2f, V0: %8.4f A^3" %\
(self.k0, self.kp, self.v0))
if not f_fix.flag:
print("Kp not fixed")
else:
print("Kp fixed")
if exclude.ex_mode != []:
uniq=np.unique(exclude.ex_mode)
print("\nZone center excluded modes: %s" % str(uniq))
else:
print("\nNo zone center excluded modes")
if disp.ex_flag:
uniq=np.unique(disp.excluded_list)
print("Off center excluded modes: %s" % str(uniq))
else:
print("No off center excluded modes")
if kieffer.flag==True:
print("\nKieffer model on; frequencies %5.2f %5.2f %5.2f cm^-1" %\
(kieffer.kief_freq_inp[0], kieffer.kief_freq_inp[1], \
kieffer.kief_freq_inp[2]))
else:
print("\nKieffer model off")
if anharm.flag:
print("\nAnharmonic correction for mode(s) N. %s" % str(anharm.mode).strip('[]'))
print("Brillouin flag(s): %s" % str(anharm.brill).strip('[]'))
if disp.flag:
print("\n--------------- Phonon dispersion --------------------")
print("\nDispersion correction activated for the computation of entropy and")
print("specific heat:")
print("Number of frequency sets: %3i" % disp.nset)
if disp.nset > 1:
if disp.fit_type == 0:
print("Polynomial fit of the frequencies; degree: %3i " % disp.fit_degree)
else:
print("Spline fit of the frequencies; degree: %3i, smooth: %3.1f"\
% (disp.fit_degree, disp.fit_type))
print("Number of off-centered modes: %5i" % disp.f_size)
if disp.eos_flag:
print("\nThe phonon dispersion is used for the computation of the bulk modulus")
print("if the bulk_dir or the bulk_modulus_p functions are used, the latter")
print("in connection with the noeos option.")
if disp.fit_vt_flag:
print("The required V,T-fit of the free energy contribution from")
print("the off-centered modes is ready. Fit V,T-powers: %3i, %3i"
% (disp.fit_vt_deg_v, disp.fit_vt_deg_t))
else:
print("The required V,T-fit of the free energy contribution from")
print("the off-centered mode is NOT ready.")
else:
print("\nThe phonon dispersion correction is not used for the computation")
print("of the bulk modulus")
if disp.thermo_vt_flag & (disp.nset > 1):
print("\nVT-phonon dispersion correction to the thermodynamic properties")
elif (not disp.thermo_vt_flag) & (disp.nset > 1):
print("\nT-phonon dispersion correction to the thermodynamic properties")
print("Use disp.thermo_vt_on() to activate the V,T-correction")
print("\n --------------------------------------------------------")
if lo.flag:
out_lo=(lo.mode, lo.split)
df_out=pd.DataFrame(out_lo, index=['Mode', 'Split'])
df_out=df_out.T
df_out['Mode']=np.array([int(x) for x in df_out['Mode']], dtype=object)
print("\nFrequencies corrected for LO-TO splitting.\n")
if verbose.flag:
print(df_out.to_string(index=False))
print("---------------------------------------------")
print("\n**** Volume driver for volume_dir function ****")
print("Delta: %3.1f; degree: %2i; left: %3.1f; right: %3.1f, Kp_fix: %s; t_max: %5.2f"\
% (volume_ctrl.delta, volume_ctrl.degree, volume_ctrl.left, volume_ctrl.right,\
volume_ctrl.kp_fix, volume_ctrl.t_max))
print("EoS shift: %3.1f; Quad_shrink: %2i; T_dump: %3.1f; Dump fact.: %2.1f, T_last %4.1f" % \
(volume_ctrl.shift, volume_ctrl.quad_shrink, volume_ctrl.t_dump, volume_ctrl.dump,\
volume_ctrl.t_last))
print("Upgrade shift: %r" % volume_ctrl.upgrade_shift)
print("\n**** Volume driver for volume_from_F function ****")
print("In addition to the attributes set in the parent volume_control_class:")
print("shift: %3.1f, flag: %r, upgrade_shift: %r" % (volume_F_ctrl.get_shift(), \
volume_F_ctrl.get_flag(), volume_F_ctrl.get_upgrade_status()))
print("\n**** Numerical T-derivatives driver class (delta_ctrl) ****")
if not delta_ctrl.adaptive:
print("Delta: %3.1f" % delta_ctrl.delta)
print("Degree: %3i" % delta_ctrl.degree)
print("N. of points %3i" % delta_ctrl.nump)
else:
print("Adaptive scheme active:")
print("T_min, T_max: %4.1f, %6.1f K" % (delta_ctrl.tmin, delta_ctrl.tmax))
print("Delta_min, Delta_max: %4.1f, %6.1f K" % (delta_ctrl.dmin, delta_ctrl.dmax))
print("Degree: %3i" % delta_ctrl.degree)
print("N. of points %3i" % delta_ctrl.nump)
if verbose.flag:
print("\n--------- Database section ---------")
print("Loaded phases:")
print(self.min_names)
class exclude_class():
"""
Contains the list of modes to be excluded from the
calculation of the Helmholtz free energy.
It can be constructed by using the keyword EXCLUDE
in the input.txt file.
"""
def __init__(self):
self.ex_mode=[]
self.ex_mode_keep=[]
self.flag=False
def __str__(self):
return "Excluded modes:" + str(self.ex_mode)
def add(self,modes):
"""
Args:
n : can be a scalar or a list of modes to be excluded
"""
if type(modes) is list:
self.ex_mode.extend(modes)
self.flag=True
elif type(modes) is int:
self.ex_mode.append(modes)
self.flag=True
else:
print("** Warning ** exclude.add(): invalid input type")
return
def restore(self):
"""
Restores all the excluded modes
"""
if self.flag:
self.ex_mode_keep=self.ex_mode
self.ex_mode=[]
self.flag=False
def on(self):
self.ex_mode=self.ex_mode_keep
self.flag=True
class fix_flag:
def __init__(self,value=0.):
self.value=value
self.flag=False
def on(self,value=4):
self.value=value
self.flag=True
def off(self):
self.value=0.
self.flag=False
class fit_flag:
def __init__(self):
pass
def on(self):
self.flag=True
def off(self):
self.flag=False
class spline_flag(fit_flag):
"""
Sets up the spline fit of the frequencies as functions of
the volume of the unit cell.
Several variables are defined:
1. flag: (boolean); if True, frequencies are fitted with splines
2. degree: degree of the spline
3. smooth: *smoothness* of the spline
4. flag_stack: (boolean) signals the presence of the spline stack
5. pol_stack: it is the stack containing parameters for the spline fit
Note:
The spline stack can be set up and initialized by using the keyword\
SPLINE under the keyword FITVOL in the *input.txt* file
Methods:
"""
def __init__(self,flag=False,degree=3,smooth=0):
super().__init__()
self.flag=False
self.flag_stack=False
self.degree=degree
self.smooth=smooth
self.pol_stack=np.array([])
def on(self):
super().on()
def off(self):
super().off()
def set_degree(self,degree):
self.degree=int(degree)
def set_smooth(self,smooth):
self.smooth=smooth
def stack(self):
self.pol_stack=freq_stack_spline()
self.flag_stack=True
def vol_range(self,v_ini, v_fin, npoint):
self.fit_vol=np.linspace(v_ini, v_fin, npoint)
class poly_flag(fit_flag):
def __init__(self,flag=False,degree=2):
super().__init__()
self.flag=flag
self.flag_stack=False
self.degree=degree
self.pol_stack=np.array([])
def on(self):
super().on()
def off(self):
super().off()
def set_degree(self,degree):
self.degree=int(degree)
def stack(self):
self.pol_stack=freq_stack_fit()
self.flag_stack=True
def vol_range(self,v_ini, v_fin, npoint):
self.fit_vol=np.linspace(v_ini, v_fin, npoint)
class kieffer_class():
def __str__(self):
return "Application of the Kieffer model for acoustic phonons"
def __init__(self,flag=False):
self.flag=False
self.stack_flag=False
self.kief_freq=None
self.kief_freq_inp=None
self.t_range=None
self.f_list=None
self.input=False
def stack(self, t_range, f_list):
self.t_range=t_range
self.f_list=f_list
def get_value(self,temperature):
free=scipy.interpolate.interp1d(self.t_range, self.f_list, kind='quadratic')
return free(temperature)*zu
def on(self):
self.flag=True
print("Kieffer correction on")
if disp.flag:
disp.flag=False
print("Phonon dispersion is deactivated")
if not self.stack_flag:
free_stack_t(pr.kt_init,pr.kt_fin,pr.kt_points)
def off(self):
self.flag=False
print("Kieffer correction off")
def freq(self,f1,f2,f3):
self.kief_freq_inp=np.array([f1, f2, f3])
self.kief_freq=self.kief_freq_inp*csl*h/kb
free_stack_t(pr.kt_init,pr.kt_fin,pr.kt_points)
def plot(self):
plt.figure()
plt.plot(self.t_range, self.f_list, "k-")
plt.xlabel("Temperature (K)")
plt.ylabel("F free energy (J/mol apfu)")
plt.title("Free energy from acustic modes (Kieffer model)")
plt.show()
class bm4_class():
"""
Set up and information for a 4^ order Birch-Murnaghan EoS (BM4)
It provides:
1. energy: function; Volume integrated BM4 (V-BM4)
2. pressure: function; BM4
3. bm4_static_eos: BM4 parameters for the static energy
calculation as a function of V
4. en_ini: initial values for the BM4 fit
5. bm4_store: BM4 parameters from a fitting at a given
temperature
methods:
"""
def __init__(self):
self.flag=False
self.start=True
self.energy=None
self.pressure=None
self.en_ini=None
self.bm4_static_eos=None
self.bm4_store=None
def __str__(self):
return "BM4 setting: " + str(self.flag)
def on(self):
"""
Switches on the BM4 calculation
"""
self.flag=True
if self.start:
self.energy, self.pressure=bm4_def()
self.start=False
def estimates(self,v4,e4):
"""
Estimates initial values of BM4 parameters for the fit
"""
ini=init_bm4(v4,e4,4.0)
new_ini,dum=curve_fit(v_bm3, v4, e4, \
p0=ini,ftol=1e-15,xtol=1e-15)
kpp=(-1/new_ini[1])*((3.-new_ini[2])*\
(4.-new_ini[2])+35./9.)*1e-21/conv
self.en_ini=[new_ini[0], new_ini[1],\
new_ini[2], kpp, new_ini[3]]
k0_ini=new_ini[1]*conv/1e-21
print("\nBM4-EoS initial estimate:")
print("\nV0: %6.4f" % self.en_ini[0])
print("K0: %6.2f" % k0_ini)
print("Kp: %6.2f" % self.en_ini[2])
print("Kpp: %6.2f" % self.en_ini[3])
print("E0: %8.6e" % self.en_ini[4])
def store(self,bm4st):
"""
Stores BM4 parameters from a fit a given temperature
"""
self.bm4_store=bm4st
def upload(self,bm4_eos):
"""
Loads the parameters from the static calculation
(that are then stored in bm4_static_eos)
"""
self.bm4_static_eos=bm4_eos
def upgrade(self):
"""
Uses the stored values of parameters [from the application of
store()] to upgrade the initial estimation done with estimates()
"""
self.en_ini=self.bm4_store
def off(self):
"""
Switches off the BM4 calculation
"""
self.flag=False
def status(self):
"""
Informs on the status of BM4 (on, or off)
"""
print("\nBM4 setting: %s " % self.flag)
class gamma_class():
"""
Store coefficients of a gamma(T) fit
"""
def __init__(self):
self.flag=False
self.degree=1
self.pol=np.array([])
def upload(self,deg,pcoef):
self.flag=True
self.degree=deg
self.pol=pcoef
class super_class():
"""
Store supercell data: number of cells on which the frequencies
computation was done. To be used in connection with CRYSTAL
calculations performed with SCELPHONO and QHA keywords.
Default value: 1
"""
def __init__(self):
self.number=1
self.flag=False
def set(self,snum):
self.flag=True
self.number=snum
print("\n*** Supercell *** Number of cells: %3i" % snum)
def reset(self):
self.flag=False
self.number=1
print("\n*** Supercell deactivated *** Number of cells set to 1")
class lo_class():
"""
LO/TO splitting correction.
The class stores a copy of the original TO frequencies, the modes
affected by LO/TO splitting and the splitting values.
Modes are identified by their progressive number (starting from 0) stored
in the *mode* attribute.
When the correction is activated, new values of frequencies (*f_eff*)
are computed for the relevant modes, according to the formula:
f_eff = 2/3 f_TO + 1/3 f_LO
where f_LO = f_TO + split.
Correction is activated by the keyword LO in the input.txt file,
followed by the name of the file containing the splitting data (two
columns: mode number and the corresponding split in cm^-1).
Internally, the methods *on* and *off* switch respectively on and off
the correction. The method *apply* does the computation of the frequencies
*f_eff*.
"""
def __init__(self):
self.flag=False
self.mode=np.array([])
self.split=np.array([])
self.data_freq_orig=np.array([])
self.data_freq=np.array([])
def on(self):
self.apply()
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
self.flag=True
print("Frequencies corrected for LO-TO splitting")
def off(self):
self.flag=False
self.data_freq=np.copy(self.data_freq_orig)
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
print("LO-TO splitting not taken into account")
def apply(self):
for ifr in np.arange(lo.mode.size):
im=lo.mode[ifr]
for iv in int_set:
freq_lo=self.data_freq_orig[im,iv+1]+self.split[ifr]
self.data_freq[im,iv+1]=(2./3.)*self.data_freq_orig[im,iv+1]\
+(1./3.)*freq_lo
class anh_class():
def __init__(self):
self.flag=False
self.disp_off=0
def off(self):
self.flag=False
exclude.restore()
if disp.input_flag:
disp.free_exclude_restore()
print("Anharmonic correction is turned off")
print("Warning: all the excluded modes are restored")
def on(self):
self.flag=True
self.flag_brill=False
for im, ib in zip(anharm.mode, anharm.brill):
if ib == 0:
exclude.add([im])
elif disp.input_flag:
disp.free_exclude([im])
self.flag_brill=True
if self.flag_brill:
disp.free_fit_vt()
print("Anharmonic correction is turned on")
class static_class():
"""
Defines the volume range for the fit of the static EoS
If not specified (default) such range is defined from the
volumes found in the static energies file.
"""
def __init__(self):
self.flag=False
def set(self, vmin, vmax):
"""
Sets the minimum and maximum volumes for the V-range
Args:
vmin: minimum volume
vmax: maximum volume
"""
self.vmin=vmin
self.vmax=vmax
def off(self):
"""
Restores the original V-range (actually, it switches off the volume
selection for the fit of the static EoS)
"""
self.flag=False
def on(self):
"""
It switches on the volume selection for the fit of the static EoS
Note:
The minimum and maximum V-values are set by the 'set' method
of the class
"""
self.flag=True
class p_static_class():
def __init__(self):
self.flag=False
self.vmin=None
self.vmax=None
self.pmin=None
self.pmax=None
self.npoints=None
self.k0=None
self.kp=None
self.v0=None
self.e0=None
class volume_control_class():
"""
Defines suitable parameters for the volume_dir function
"""
def __init__(self):
self.degree=2
self.delta=2.
self.t_max=500.
self.shift=0.
self.t_dump=0.
self.dump=1.
self.quad_shrink=4
self.kp_fix=False
self.debug=False
self.upgrade_shift=False
self.skew=1.
self.t_last=0.
self.t_last_flag=False
self.v_last=None
def set_degree(self, degree):
"""
Sets the degree of polynomial used to fit the (P(V)-P0)^2 data.
The fitted curve is the minimized to get the equilibrium volume
at each T and P.
For each of the single parameter revelant in this class, there exist
a specific method to set its value. The method set_all can be used to
set the values of a number of that, at the same time, by using appropriate
keywords as argument. The arguments to set_all are:
Args:
degree: degree of the fitting polynomial (default=2)
delta: volume range where the minimum of the fitting function
is to be searched (default=2.)
skew: the Volume range is centered around the equilibrium
volume approximated by the EoS-based new_volume function
The symmetry around such point can be controlled by
the skew parameter (default=1.: symmetric interval)
shift: Systematic shift from the new_volume estimation (default=0.)
t_max: In the initial estimation of the volume at P/T with the EoS-based
new_volume function, the Kp is refined if T < t_max.
If T > t_max and kp_fix=True, Kp is fixed at the value
refined at t_max (default=500K)
kp_fix: See t_max (default=True)
quad_shrink: if degree=2, it restricts the volume range around the
approximated volume found. The new range is
delta/quad_shrink (default=4)
upgrade_shift: at the end of the computation, the difference between
the volume found and the initial one (from the EoS-
based new_volume function) is calculated. The shift
attribute is then upgraded if upgrade_shift is True
(default=False)
debug: if True, the (P(V)-P0)**2 function is plotted as a function
of V (default=False)
t_dump: temperature over which a dumping on the shift parameter is
applied (default=0.)
dump: dumping on the shift parameter (shift=shift/dump; default=1.)
t_last: if t_last > 10., the last volume computed is used as the
initial guess value (vini) for the next computation at a
new temperature.
"""
self.degree=degree
def set_delta(self, delta):
self.delta=delta
def set_tmax(self,tmax):
self.t_max=tmax
def set_skew(self, skew):
self.left=skew+1
self.right=(skew+1)/skew
def kp_on(self):
self.kp_fix=True
def kp_off(self):
self.kp_fix=False
def debug_on(self):
self.debug=True
def debug_off(self):
self.debug=False
def set_shift(self, shift):
self.shift=shift
def upgrade_shift_on(self):
self.upgrade_shift=True
def upgrade_shift_off(self):
self.ugrade_shift=False
def set_shrink(self, shrink):
self.quad_shrink=shrink
def shift_reset(self):
self.shift=0.
def set_t_dump(self,t_dump=0., dump=1.0):
self.t_dump=t_dump
self.dump=dump
def set_t_last(self, t_last):
self.t_last=t_last
def set_all(self,degree=2, delta=2., skew=1., shift=0., t_max=500.,\
quad_shrink=4, kp_fix=True, upgrade_shift=False, debug=False,\
t_dump=0., dump=1., t_last=0.):
self.degree=degree
self.delta=delta
self.t_max=t_max
self.kp_fix=kp_fix
self.debug=debug
self.left=skew+1
self.right=(skew+1)/skew
self.shift=shift
self.quad_shrink=quad_shrink
self.upgrade_shift=upgrade_shift
self.skew=skew
self.t_last=t_last
class volume_F_control_class():
"""
Class controlling some parameters relevant for the computation of
volume and thermal expansion by using the volume_from_F function.
Precisely, the initial volume (around which the refined volume vref
is to be searched) is set to vini+shift, where vini is the
output from the volume_dir, whereas shift is from this class.
Shift is computed as the difference vref-vini; it can be upgraded
provided the flag upgrade_shift is set to True.
"""
def __init__(self):
self.shift=0.
self.upgrade_shift=False
self.flag=False
def on(self):
self.flag=True
def off(self):
self.flag=False
def set_shift(self, sh):
self.shift=sh
def upgrade_on(self):
self.upgrade_shift=True
def upgrade_off(self):
self.upgrade_shift=False
def get_shift(self):
return self.shift
def get_upgrade_status(self):
return self.upgrade_shift
def get_flag(self):
return self.flag
class delta_class():
"""
Control parameters for the numerical evaluation of the first and second
derivatives of the Helmholtz free energy as a function of T. They are
relevant for the entropy_v function that computes both the entropy and
specific heat at a fixed volume, as well as the computation of thermal
expansion.
Initial values of delta, degree and number of points are read
from the parameters file 'parame.py'
New values can be set by the methods set_delta, set_degree and set_nump
of the class. values can be retrieved by the corresponding 'get' methods.
The reset method set the default values.
An adaptive scheme is activated by the method adaptive_on (adaptive_off
deactivates the scheme). In this case the delta value is computed as a function
of temperature (T). Precisely:
delta=delta_min+(T-t_min)*(delta_max-delta_min)/(t_max-t_min)
delta=delta_min if T < t_min
delta=delta_max if T > t_max
The paramaters t_min, t_max, delta_min and delta_max can be set by the
adaptive_set method (default values 50, 1000, 10, 50, respectively)
"""
def __init__(self):
self.delta=pr.delta
self.nump=pr.nump
self.degree=pr.degree
self.adaptive=False
self.tmin=50.
self.tmax=1000.
self.dmin=10.
self.dmax=50.
def adaptive_on(self):
self.adaptive=True
def adaptive_off(self):
self.adaptive=False
def adaptive_set(self, tmin=50., tmax=1000., dmin=10., dmax=50.):
self.tmin=tmin
self.tmax=tmax
self.dmin=dmin
self.dmax=dmax
def set_delta(self,delta):
self.delta=delta
print("Delta T value, for the computation of entropy, Cv and thermal expansion set to %4.1f" \
% self.delta)
def set_degree(self,degree):
self.degree=degree
print("Degree for the computation of entropy, Cv and thermal expansion set to %3i" \
% self.degree)
def set_nump(self,nump):
self.nump=nump
print("N. points for the computation of entropy, Cv and thermal expansion set to %3i" \
% self.nump)
def get_delta(self, tt=300):
if not self.adaptive:
return self.delta
else:
if tt < self.tmin:
return self.dmin
elif tt > self.tmax:
return self.dmax
else:
return self.dmin+((tt-self.tmin)/(self.tmax-self.tmin))*(self.dmax-self.dmin)
def get_degree(self):
return self.degree
def get_nump(self):
return self.nump
def reset(self):
self.delta=pr.delta
self.degree=pr.degree
self.nump=pr.nump
print("\nDefault parameters for the computation of entropy, Cv and thermal expansion:")
print("Delta: %3.1f" % self.delta)
print("Degree: %3i" % self.degree)
print("Num. points: %3i" % self.nump)
class disp_class():
"""
Sets up the computation for the inclusion of phonon dispersion effects
the EoS computation or for the calculation of all the thermodynamic
properties.
The class is relevant and activated if the DISP keyword is contained
in the input.txt input file.
Dispersion effects can be switched on or off by using the on() and off()
methods.
Note:
To apply the phonon dispersion correction to computation of an equation
of state, the method eos_on() must be invoked [the method eos_off() switches
it off]. In this case, more than one volume must be present in the input
file for dispersion.
Note:
If phonon frequencies are computed for several values of the unit cell volume,
in order to apply a VT-phonon dispersion correction to thermodynamic properties,
the method thermo_vt_on() must be invoked [the method thermo_vt_off() switches it off].
On the contrary, a T-phonon dispersion correction is applied (it is assumed that
phonon frequencies do not change with volume).
Note:
The method free_fit_vt() must be used to get the F(V,T) function for
off-center phonon modes.
"""
def __init__(self):
self.input_flag=False
self.flag=False
self.eos_flag=False
self.thermo_vt_flag=False
self.freq=None
self.deg=None
self.fit_type=None
self.input=False
self.fit_vt_flag=False
self.fit_vt=None
self.temp=None
self.error_flag=False
self.ex_flag=False
self.free_min_t=10.
self.fit_vt_deg_t=4
self.fit_vt_deg_v=4
self.fit_t_deg=6
self.free_nt=24
self.free_disp=True
def on(self):
self.flag=True
if anharm.disp_off > 0:
anharm.mode=np.copy(anharm.mode_orig)
anharm.brill=np.copy(anharm.brill_orig)
anharm.nmode=anharm.nmode_orig
print("Dispersion correction activated")
if kieffer.flag:
kieffer.flag=False
print("Kieffer correction is deactivated")
def off(self):
self.flag=False
print("Dispersion correction off")
if anharm.flag:
mode_a=np.array([])
mode_b=np.array([])
for ia, ib in zip(anharm.mode, anharm.brill):
if ib == 1:
print("\nWarning: the anharmonic mode n. %2i has Brillouin flag" % ia)
print("equal to 1; it should not be considered if the dispersion")
print("correction is deactivated.\n")
anharm.disp_off=anharm.disp_off+1
else:
mode_a=np.append(mode_a, ia)
mode_b=np.append(mode_b, ib)
if anharm.disp_off == 1:
anharm.nmode_orig=anharm.nmode
anharm.mode_orig=np.copy(anharm.mode)
anharm.brill_orig=np.copy(anharm.brill)
anharm.nmode=mode_a.size
anharm.mode=np.copy(mode_a)
anharm.brill=np.copy(mode_b)
print("List of anharmonic modes considered: %s" % anharm.mode)
def eos_on(self):
if self.flag :
if not self.error_flag:
self.eos_flag=True
print("\nPhonon dispersion correction for bulk_dir or bulk_modulus_p computations")
else:
print("Only 1 volume found in the 'disp' files; NO disp_eos possible")
else:
if self.input_flag:
print("Phonon dispersion is not on; use disp.on() to activate")
else:
print("No input of dispersion data; eos_on ignored")
def eos_off(self):
self.eos_flag=False
print("No phonon dispersion correction for bulk_dir computation")
def thermo_vt_on(self):
if self.nset > 1:
self.thermo_vt_flag=True
print("VT-dispersion correction of thermodynamic properties\n")
if not self.fit_vt_flag:
self.free_fit_vt()
else:
print("One volume only found in the DISP file")
def thermo_vt_off(self):
self.thermo_vt_flag=False
print("T-dispersion correction of thermodynamic properties")
print("No volume dependence considered")
def freq_spline_fit(self):
"""
It requests and makes spline fits of the frequencies of the off
center modes as function of volumes.
Relevant parameters for the fit (degree and smooth parameters) are
specified in the appropriate input file.
"""
self.spline=np.array([])
ord_vol=list(np.argsort(self.vol))
vol = [self.vol[iv] for iv in ord_vol]
for ifr in np.arange(self.f_size):
freq=self.freq[:,ifr]
freq=[freq[iv] for iv in ord_vol]
ifit=UnivariateSpline(vol, freq, k=self.fit_degree, s=self.fit_type)
self.spline=np.append(self.spline, ifit)
def freq_fit(self):
"""
It requests and makes polynomial fits of the frequencies of the off
center modes as function of volumes.
The relevant parameter for the fit (degree) is specified in the
appropriate input file.
"""
self.poly=np.array([])
for ifr in np.arange(self.f_size):
if self.nset > 1:
freq=self.freq[:,ifr]
ifit=np.polyfit(self.vol, freq, self.fit_degree)
self.poly=np.append(self.poly,ifit)
else:
self.poly=np.append(self.poly, (0, self.freq[:,ifr][0]))
if self.nset == 1:
self.poly=self.poly.reshape(self.f_size,2)
else:
self.poly=self.poly.reshape(self.f_size,self.fit_degree+1)
def freq_func(self,ifr,vv):
fit=self.poly[ifr]
return np.polyval(fit,vv)
def freq_spline_func(self,ifr,vv):
fit=self.spline[ifr](vv)
return fit.item(0)
def check(self,ifr):
"""
Check of the frequencies fit quality for a specified mode
Args:
ifr: sequence number of the mode to be checked
"""
v_list=np.linspace(np.min(disp.vol), np.max(disp.vol),40)
if self.fit_type == 0:
f_list=[self.freq_func(ifr,iv) for iv in v_list]
else:
f_list=[self.freq_spline_func(ifr,iv) for iv in v_list]
tlt="Check fit for mode N. "+ str(ifr)
plt.figure()
plt.plot(v_list,f_list, "k-")
plt.plot(disp.vol, disp.freq[:,ifr],"b*")
plt.xlabel("Volume (A^3)")
plt.ylabel("Frequency (cm^-1)")
plt.title(tlt)
plt.show()
def check_multi(self, fr_l):
"""
Check of the frequencies fit quality for a list of modes
Args:
fr_l: list of sequence numbers of the various modes to be checked
Example:
>>> disp.check_multi([0, 1, 2, 3])
>>> disp.check_multi(np.arange(10))
"""
for ifr in fr_l:
self.check(ifr)
def free_exclude(self,ex_list):
"""
Excludes the indicated off-center modes from the computation of the
free energy
Args:
ex_list: list of modes to be excluded
Note:
Even a single excluded mode must be specified as a list; for instance
disp.free_exclude([0])
Note:
after the exclusion of some modes, the F(V,T) function has
to be recomputed by the free_fit_vt method
"""
if not self.input_flag:
print("no input of dispersion data")
return
self.ex_flag=True
self.excluded_list=ex_list
print("Off center modes excluded: ", self.excluded_list)
print("Compute a new disp.free_fit_vt surface")
def free_exclude_restore(self):
"""
The excluded modes are restored
"""
self.ex_flag=False
print("All off centered mode restored")
print("Compute a new disp.free_fit_vt surface")
def free(self,temp,vv):
nf_list=np.arange(self.f_size)
if self.fit_type == 0:
freq=(self.freq_func(ifr,vv) for ifr in nf_list)
else:
freq=(self.freq_spline_func(ifr,vv) for ifr in nf_list)
d_deg=self.deg
wgh=self.w_list
enz=0.
fth=0.
idx=0
nfreq=0
for ifr in freq:
if not self.ex_flag:
nfreq=nfreq+1
fth=fth+d_deg[idx]*np.log(1-np.e**(ifr*e_fact/temp))*wgh[idx]
enz=enz+d_deg[idx]*ifr*ez_fact*wgh[idx]
else:
if not (idx in self.excluded_list):
nfreq=nfreq+1
fth=fth+d_deg[idx]*np.log(1-np.e**(ifr*e_fact/temp))*wgh[idx]
enz=enz+d_deg[idx]*ifr*ez_fact*wgh[idx]
idx=idx+1
return enz+fth*kb*temp/conv
def free_fit(self,mxt,vv,disp=True):
fit_deg=self.fit_t_deg
nt=24
nt_plot=50
tl=np.linspace(10,mxt,nt)
free=np.array([])
for it in tl:
ifree=self.free(it,vv)
free=np.append(free,ifree)
fit=np.polyfit(tl,free,fit_deg)
self.fit=fit
if disp:
tl_plot=np.linspace(10,mxt,nt_plot)
free_plot=self.free_func(tl_plot)
print("Phonon dispersion correction activated")
print("the contribution to the entropy and to the")
print("specific heat is taken into account.\n")
if verbose.flag:
plt.figure()
plt.plot(tl,free,"b*",label="Actual values")
plt.plot(tl_plot, free_plot,"k-",label="Fitted curve")
plt.legend(frameon=False)
plt.xlabel("T (K)")
plt.ylabel("F (a.u.)")
plt.title("Helmholtz free energy from off-centered modes")
plt.show()
def free_fit_ctrl(self, min_t=10., t_only_deg=4, degree_v=4, degree_t=4, nt=24, disp=True):
"""
Free fit driver: sets the relevant parameters for the fit computation
of the F(V,T) function, on the values of F calculated on a grid
of V and T points.
Args:
min_t: minimum temperature for the construction of the
VT grid (default=10.)
degree_v: maximum degree of V terms of the surface (default=4)
degree_t: maximum degree ot T terms of the sarface (default=4)
t_only_degree: degree of the T polynomial for a single volume
phonon dispersion (default=4)
nt: number of points along the T axis for the definition of the
(default=24) grid
disp: it True, a plot of the surface is shown (default=True)
Note:
The method does not execute the fit, but it defines the most
important parameters. The fit is done by the free_fit_vt() method.
Note:
the volumes used for the construction of the VT grid are those
provided in the appropriate input file. They are available
in the disp.vol variable.
"""
self.free_min_t=min_t
self.fit_t_deg=t_only_deg
self.fit_vt_deg_t=degree_t
self.fit_vt_deg_v=degree_v
self.free_nt=nt
self.free_disp=disp
if self.input_flag:
self.free_fit_vt()
self.free_fit(self.temp,self.vol[0])
def set_tmin(self,tmin):
self.min_t=tmin
def set_nt(self,nt):
self.nt=nt
def free_fit_vt(self):
self.fit_vt_flag=True
min_t=self.free_min_t
nt=self.free_nt
disp=self.free_disp
deg_t=self.fit_vt_deg_t
deg_v=self.fit_vt_deg_v
max_t=self.temp
pvv=np.arange(deg_v+1)
ptt=np.arange(deg_t+1)
p_list=np.array([],dtype=int)
maxvt=np.max([deg_v, deg_t])
for ip1 in np.arange(maxvt+1):
for ip2 in np.arange(maxvt+1):
i1=ip2
i2=ip1-ip2
if i2 < 0:
break
ic=(i1, i2)
if (i1 <= deg_v) and (i2 <= deg_t):
p_list=np.append(p_list,ic)
psize=p_list.size
pterm=int(psize/2)
self.p_list=p_list.reshape(pterm,2)
x0=np.ones(pterm)
t_list=np.linspace(min_t,max_t,nt)
v_list=self.vol
nv=len(v_list)
if nv == 1:
print("\n**** WARNING ****\nOnly one volume found in the 'disp' data files;")
print("NO V,T-fit of F is possible")
self.eos_off()
self.error_flag=True
return
free_val=np.array([])
for it in t_list:
for iv in v_list:
ifree=self.free(it,iv)
free_val=np.append(free_val,ifree)
free_val=free_val.reshape(nt,nv)
vl,tl=np.meshgrid(v_list,t_list)
vl=vl.flatten()
tl=tl.flatten()
free_val=free_val.flatten()
fit, pcov = curve_fit(self.free_vt_func, [vl, tl], free_val, p0 = x0)
self.fit_vt=fit
error=np.array([])
for it in t_list:
for iv in v_list:
f_calc=self.free_vt(it,iv)
f_obs=self.free(it,iv)
ierr=(f_calc-f_obs)**2
error=np.append(error,ierr)
mean_error=np.sqrt(np.mean(error))
max_error=np.sqrt(np.max(error))
print("V,T-fit of the Helmholtz free energy contribution from the off-centered modes")
print("V, T powers of the fit: %3i %3i" % (self.fit_vt_deg_v, self.fit_vt_deg_t))
print("Mean error: %5.2e" % mean_error)
print("Maximum error: %5.2e" % max_error)
if self.ex_flag:
print("Excluded modes: ", self.excluded_list)
if disp:
t_plot=np.linspace(min_t,max_t,40)
v_plot=np.linspace(np.min(vl),np.max(vl),40)
v_plot,t_plot=np.meshgrid(v_plot,t_plot)
v_plot=v_plot.flatten()
t_plot=t_plot.flatten()
h_plot=self.free_vt_func([v_plot, t_plot], *fit)
h_plot=h_plot.reshape(40,40)
v_plot=v_plot.reshape(40,40)
t_plot=t_plot.reshape(40,40)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111,projection='3d', )
ax.scatter(tl,vl,free_val,c='r')
ax.plot_surface(t_plot, v_plot, h_plot)
ax.set_xlabel("Temperature", labelpad=7)
ax.set_ylabel("Volume", labelpad=7)
ax.set_zlabel('F(T,V)', labelpad=8)
plt.show()
def free_vt_func(self,data,*par):
vv=data[0]
tt=data[1]
nterm=self.p_list.shape[0]
func=0.
for it in np.arange(nterm):
pv=self.p_list[it][0]
pt=self.p_list[it][1]
func=func+par[it]*(vv**pv)*(tt**pt)
return func
def free_vt(self,temp,volume):
return self.free_vt_func([volume,temp],*self.fit_vt)
def free_func(self,temp):
free_disp=np.polyval(self.fit,temp)
return free_disp
class volume_delta_class():
"""
Defines a suitable V range for the numerical evaluation of the
derivatives of any quantity with respect to V.
The V-range (delta) is obtained by multiplying the static equilibrium
volume (V0; which is computed by the static function) with a factor read
from the parame.py parameters' file; such parameter (frac) is stored
in the vd.frac variable and can also be set by the set_frac method.
The method set_delta computes delta, provided a volume is input.
When delta is computed, the vd.flag is set to True and its values
is used in several functions computing derivatives. On the contrary,
if vd.flag is set to False (use the method off), the delta
value is read from the parameters' file (pr.delta_v).
"""
def __init__(self):
self.v0=None
self.flag=False
self.delta=None
self.frac=pr.v_frac
def set_delta(self,vol=0.):
"""
Sets the V-delta value for the calculation of derivatives with
respect to V.
Args:
vol: if vol > 0.1, computes delta for the volume vol;
if vol < 0.1, vol is set to the default value stored
in the v0 variable.
"""
if vol < 0.1:
if self.v0 != None:
self.flag=True
self.delta=self.frac*self.v0
else:
war1="Warning: No volume provided for the set_delta method\n"
war2=" The delta value is read from the parameters file"
war=war1+war2+": %5.4f"
print(war % pr.delta_v)
self.flag=False
else:
self.delta=vol*self.frac
self.flag=True
self.v0=vol
def set_frac(self,frac):
self.frac=frac
def on(self):
self.flag=True
def off(self):
self.flag=False
class thermal_expansion_class():
"""
Interface for the computation of thermal expansion by different algorithms.
The method 'compute' performs the calculation by calling different functions
according to the 'method' keyword. Similarly, the method 'compute_serie'
performs the calculation of alpha as a function of temperature.
Several default parameters for the calculation are provided, which can
be set by the method 'set'.
The algortithms which are currently implemented can be listed by the method
'info'
The 'compute_serie' method perform the calculation of the thermal
expansion in a given T-range and, optionally, performs a power
series fit on the computed values. Data from the fit can optionally be
loaded in the internal database if a phase name is provided.
Note:
For the method 'k_alpha_eos', this class uses a specialized
plotting function from the plot.py module, whose parameters are
controlled by the plot.set_param method.
"""
def __init__(self):
self.method='k_alpha_dir'
self.nt=12
self.fix=0
self.fit=False
self.tex=False
self.save=False
self.phase=''
self.title=True
def set(self, method='k_alpha_dir', nt=12, fit=False, tex=False, save=False,\
phase='', title=True, fix=0.):
self.method=method
self.nt=nt
self.fix=fix
self.fit=fit
self.tex=tex
self.save=save
self.phase=phase
self.title=title
def info(self):
print("\nMethods currently implemented\n")
print("k_alpha_dir: computes alpha from the product K*alpha, through the")
print(" derivative of P with respect to T, at constant V")
print(" At any T and P, K and P are directly computed from")
print(" the Helmholtz free energy function derivatives. No EoS")
print(" is involved at any step;")
print("k_alpha_eos: same as k_alpha_dir, but pressures and bulk moduli")
print(" are computed from an EoS;")
print("alpha_dir: the computation is perfomed through the derivative")
print(" of the unit cell volume with respect to V; volumes are")
print(" calculated without reference to any EoS, by the function")
print(" volume_dir.")
def compute(self, tt, pp, method='default', fix=0, prt=False):
"""
Thermal expansion at a specific temperature and pressure
Args:
tt: temperature (K)
pp: pressure (GPa)
method: 3 methods are currently implemented ('k_alpha_dir',
'k_alpha_eos' and 'alpha_dir'); default 'k_alpha_dir'
fix: relevant for method 'k_alpha_eos' (default 0., Kp not fixed)
prt: relevant for method 'k_alpha_eos'; it controls printout
(default False)
"""
if method=='default':
method=self.method
if fix==0:
fix=self.fix
if method=='k_alpha_dir':
if prt:
alpha_dir_from_dpdt(tt, pp, prt)
else:
alpha,k,vol=alpha_dir_from_dpdt(tt, pp, prt)
return alpha
elif method=='k_alpha_eos':
exit=False
if not prt:
exit=True
alpha=thermal_exp_p(tt, pp, False, exit, fix=fix)
return alpha[0]
else:
thermal_exp_p(tt, pp, plot=False, ex=exit, fix=fix)
elif method=='alpha_dir':
alpha=alpha_dir(tt,pp)
if prt:
print("Thermal expansion: %6.2e K^-1" % alpha)
else:
return alpha
else:
msg="*** Warning: method "+method+" not implemented"
print(msg)
def compute_serie(self, tmin, tmax, pressure=0, nt=0, fit='default', tex='default',\
title='default', save='default', phase='default', method='default',\
prt=True, fix=0):
"""
Thermal expansion in a T-range
Args:
tmin, tmax: minimum and maximum temperature in the range
pressure: pressure (GPa); default 0
nt: number of points in the T-range; if nt=0, the default is chosen (12)
method: one of the three methods currently implemented
fit: if True, a power series fit is performed
phase: if fit is True and a phase name is specified (label), the data
from the power series fit are loaded in the internal database
fix: relevant for the method 'k_alpha_eos'; if fix is not 0.,
Kp is fixed at the specified value
title: if True, a title of the plot is provided
tex: if tex is True, laTeX formatting is provided
prt: relevant for the method 'k_alpha_eos'
save: if True, the plot is saved in a file
Note:
if save is True and method is 'k_alpha_eos', the name of the file
where the plot is saved is controlled by the plot.name and plot.ext variables.
The file resolution is controlled by the plot.dpi variable.
The appropriate parameters can be set by the set_param method
of the plot instance of the plot_class class (in the plot.py module)
Example:
>>> plot.set_param(dpi=200, name='alpha_k_eos_serie')
>>> thermal_expansion.compute_serie(100, 500, method='k_alpha_eos', save=True)
"""
if nt==0:
nt=self.nt
if fit=='default':
fit=self.fit
if tex=='default':
tex=self.tex
if title=='default':
title=self.title
if save=='default':
save=self.save
if phase=='default':
phase=self.phase
if method=='default':
method=self.method
t_list=np.linspace(tmin, tmax, nt)
t_plot=np.linspace(tmin, tmax, nt*10)
if method=='k_alpha_dir':
if fit and phase == '':
alpha_fit=alpha_dir_from_dpdt_serie(tmin, tmax, nt, pressure, fit, phase, save,\
title, tex)
return alpha_fit
else:
alpha_dir_from_dpdt_serie(tmin, tmax, nt, pressure, fit, phase, save,\
title, tex)
elif method=='alpha_dir':
if not fit:
alpha_dir_serie(tmin, tmax, nt, pressure, fit, prt=prt)
else:
alpha_fit=alpha_dir_serie(tmin, tmax, nt, pressure, fit, prt=prt)
if phase != '':
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
print("")
else:
return alpha_fit
elif method=='k_alpha_eos':
alpha_list=np.array([])
for it in t_list:
ia=self.compute(it, pressure, method='k_alpha_eos', fix=fix)
alpha_list=np.append(alpha_list, ia)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
if fit:
alpha_fit_plot=alpha_dir_fun(t_plot,*alpha_fit)
tit=''
if tex and title:
tit=r'Thermal expansion (method k\_alpha\_eos)'
elif title:
tit='Thermal expansion (method k_alpha_eos)'
if fit:
x=[t_list, t_plot]
y=[alpha_list, alpha_fit_plot]
style=['k*', 'k-']
lab=['Actual values', 'Power series fit']
if tex:
plot.multi(x,y,style, lab, xlab='Temperature (K)',\
ylab=r'$\alpha$ (K$^{-1}$)', title=tit, tex=True, save=save)
else:
plot.multi(x,y,style, lab, xlab='Temperature (K)',\
title=tit, ylab='Alpha (K$^{-1}$)', save=save)
else:
if tex:
plot.simple(t_list, alpha_list, xlab='Temperature (K)',\
ylab=r'$\alpha$ (K$^{-1}$)', title=tit, tex=True, save=save)
else:
plot.simple(t_list, alpha_list, xlab='Temperature (K)',\
title=tit, ylab='Alpha (K$^{-1}$)', save=save)
if fit:
if phase != '':
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
print("")
else:
return alpha_fit
else:
msg="*** Warning: method "+method+" not implemented"
print(msg)
# reads in data file. It requires a pathname to the folder
# containing data
def read_file(data_path):
global volume, energy, deg, data_vol_freq, num_set_freq
global num_mode, ini, int_set, int_mode, data_vol_freq_orig
global temperature_list, pcov, data_freq, path, data_file
global data, zu, apfu, power, lpow, power_a, lpow_a, mass
global flag_eos, flag_cp, flag_alpha, flag_err, flag_exp, flag_mass
global data_cp_exp, data_p_file, static_e0
flag_eos=False
flag_cp=False
flag_alpha=False
flag_err=False
flag_exp=False
flag_fit=False
flag_mass=False
flag_super=False
flag_static, flag_volume, flag_freq, flag_ini, flag_fu, flag_set, flag_p_static\
= False, False, False, False, False, False, False
path=data_path
input_file=data_path+'/'+'input.txt'
line_limit=100
with open(input_file) as fi:
jc=0
l0=['']
while (l0 !='END') and (jc < line_limit):
str=fi.readline()
lstr=str.split()
l0=''
if lstr !=[]:
l0=lstr[0].rstrip()
if l0 !='#':
if l0=='STATIC':
data_file=data_path+'/'+fi.readline()
data_file=data_file.rstrip()
flag_static=os.path.isfile(data_file)
elif l0=='PSTATIC':
data_p_file=data_path+'/'+fi.readline()
data_p_file=data_p_file.rstrip()
static_e0=fi.readline().rstrip()
flag_p_static=os.path.isfile(data_p_file)
print("\n*** INFO *** P/V static data found: use p_static")
print(" function to get a BM3-EoS")
elif l0=='VOLUME':
data_file_vol_freq=data_path+'/'+fi.readline()
data_file_vol_freq=data_file_vol_freq.rstrip()
flag_volume=os.path.isfile(data_file_vol_freq)
elif l0=='FREQ':
data_file_freq=data_path+'/'+fi.readline()
data_file_freq=data_file_freq.rstrip()
flag_freq=os.path.isfile(data_file_freq)
elif l0=='EXP':
data_file_exp=data_path+'/'+fi.readline()
data_file_exp=data_file_exp.rstrip()
flag_exp=os.path.isfile(data_file_exp)
elif l0=='LO':
lo_freq_file=data_path+'/'+fi.readline()
lo_freq_file=lo_freq_file.rstrip()
lo.flag=True
elif l0=='FITVOL':
fit_type=fi.readline()
fit_vol=fi.readline()
flag_fit=True
elif l0=='FU':
zu=fi.readline()
flag_fu=True
elif l0=='MASS':
mass=fi.readline()
flag_mass=True
elif l0=='SET':
istr=fi.readline()
while istr.split()[0] =='#':
istr=fi.readline()
int_set=istr
flag_set=True
elif l0=='TEMP':
temperature_list=fi.readline()
flag_eos=True
elif l0=='TITLE':
title=fi.readline().rstrip()
info.title=title
elif l0=='INI':
ini=fi.readline()
flag_ini=True
elif l0=='CP':
power=fi.readline()
flag_cp=True
elif l0=='ALPHA':
power_a=fi.readline()
flag_alpha=True
elif l0=='EXCLUDE':
exclude.restore()
ex_mode=fi.readline()
ex_mode=list(map(int, ex_mode.split()))
exclude.add(ex_mode)
elif l0=='KIEFFER':
kieffer.input=True
kieffer.flag=True
kief_freq=fi.readline()
kief_freq_inp=list(map(float, kief_freq.split()))
kief_freq=np.array(kief_freq_inp)*csl*h/kb
kieffer.kief_freq=kief_freq
kieffer.kief_freq_inp=kief_freq_inp
elif l0=='ANH':
anharm.nmode=int(fi.readline().rstrip())
anharm.mode=np.array([],dtype=int)
anharm.wgt=np.array([],dtype=int)
anharm.brill=np.array([],dtype=int)
for im in np.arange(anharm.nmode):
line=fi.readline().rstrip()
mw=list(map(int, line.split()))
mode=int(mw[0])
brill=int(mw[1])
wgt=int(mw[2])
anharm.mode=np.append(anharm.mode, mode)
anharm.wgt=np.append(anharm.wgt, wgt)
anharm.brill=np.append(anharm.brill, brill)
anharm.flag=True
elif l0=='SUPER':
line=fi.readline().rstrip()
line_val=list(map(int, line.split()))
snum=line_val[0]
static_vol=line_val[1]
flag_static_vol=False
if static_vol == 0:
flag_static_vol=True
flag_super=True
elif l0=='DISP':
disp.input_flag=True
disp.flag=True
disp.input=True
disp_file=data_path+'/'+fi.readline()
disp_info=data_path+'/'+fi.readline()
disp_file=disp_file.rstrip()
disp_info=disp_info.rstrip()
fd=open(disp_info)
line=fd.readline().rstrip().split()
disp.molt=int(line[0])
disp.fit_degree=int(line[1])
disp.fit_type=float(line[2])
disp.temp=float(line[3])
line=fd.readline().rstrip().split()
disp.numf=list(map(int, line))
line=fd.readline().rstrip().split()
disp.wgh=list(map(int, line))
line=fd.readline().rstrip().split()
disp.vol=list(map(float, line))
fd.close()
w_list=np.array([],dtype=int)
for iw in np.arange(disp.molt):
wl=np.repeat(disp.wgh[iw],disp.numf[iw])
w_list=np.append(w_list,wl)
disp.w_list=w_list
disp.f_size=disp.w_list.size
jc=jc+1
if jc>=line_limit:
print("\nWarning: END keyword not found")
if not flag_volume or not flag_freq or not (flag_static or flag_p_static):
print("\nError: one or more data file not found, or not assigned"
" in input")
flag_err=True
return
if not flag_fu:
print("\nError: mandatory FU keyword not found")
flag_err=True
return
if not flag_set:
print("\nError: mandatory SET keyword not found")
flag_err=True
return
fi.close()
if flag_view_input.value:
view_input(input_file)
print("\n-------- End of input file -------\n")
flag_view_input.off()
int_set=int_set.rstrip()
int_set=list(map(int, int_set.split()))
info.freq_sets=int_set
if flag_eos:
temperature_list=temperature_list.rstrip()
temperature_list=list(map(float,temperature_list.split()))
if flag_ini:
ini=ini.rstrip()
ini=list(map(float, ini.split()))
ini[1]=ini[1]*1e-21/conv
zus=list(map(int,zu.rstrip().split()))
zu=zus[0]
apfu=zus[1]
if flag_fit:
fit_type=fit_type.rstrip()
fit_vol=fit_vol.rstrip()
fit_vol=list(map(float, fit_vol.split()))
v_ini=fit_vol[0]
v_fin=fit_vol[1]
nv=int(fit_vol[2])
if fit_type=='SPLINE':
flag_spline.on()
flag_spline.set_degree(fit_vol[3])
flag_spline.set_smooth(fit_vol[4])
flag_spline.vol_range(v_ini, v_fin, nv)
info.fit_type='spline'
info.fit_degree=flag_spline.degree
info.fit_smooth=flag_spline.smooth
info.min_vol_fit=v_ini
info.max_vol_fit=v_fin
info.fit_points=nv
elif fit_type=='POLY':
flag_poly.on()
flag_poly.set_degree(fit_vol[3])
flag_poly.vol_range(v_ini, v_fin, nv)
info.fit_type='poly'
info.fit_degree=flag_poly.degree
info.min_vol_fit=v_ini
info.max_vol_fit=v_fin
info.fit_points=nv
if flag_super:
supercell.set(snum)
if flag_cp:
power=power.rstrip()
power=list(map(float, power.split()))
lpow=len(power)
test_cp=[ipw in cp_power_list for ipw in power]
if not all(test_cp):
print("WARNING: the power list for the Cp fit is not consistent")
print(" with the Perplex database")
print("Allowed powers:", cp_power_list)
print("Given powers:", power)
print("")
if flag_alpha:
power_a=power_a.rstrip()
power_a=list(map(float, power_a.split()))
lpow_a=len(power_a)
test_al=[ipw in al_power_list for ipw in power_a]
if not all(test_al):
print("WARNING: the power list for the alpha fit is not consistent")
print(" with the Perplex database")
print("Allowed powers:", al_power_list)
print("Given powers:", power_a)
print("")
if flag_mass:
mass=float(mass.rstrip())
b_flag=False
if anharm.flag:
anharm_setup()
for im,ib in zip(anharm.mode, anharm.brill):
if ib == 0:
exclude.add([im])
else:
disp.free_exclude([im])
b_flag=True
if disp.flag:
disp.freq=np.array([])
disp_data=np.loadtxt(disp_file)
disp.deg=disp_data[:,0]
nset=len(disp.vol)
disp.nset=nset
for iv in np.arange(nset):
disp.freq=np.append(disp.freq, disp_data[:,iv+1])
disp.freq=disp.freq.reshape(nset,disp.f_size)
if disp.fit_type == 0:
disp.freq_fit()
else:
disp.freq_spline_fit()
disp.free_fit(disp.temp,disp.vol[0])
data=np.loadtxt(data_file)
if flag_p_static:
static_e0=float(static_e0)
data_vol_freq_orig=np.loadtxt(data_file_vol_freq)
lo.data_freq=np.loadtxt(data_file_freq)
lo.data_freq_orig=np.copy(lo.data_freq)
info.min_freq_vol=min(data_vol_freq_orig)
info.max_freq_vol=max(data_vol_freq_orig)
info.freq_points=len(data_vol_freq_orig)
if flag_exp:
data_cp_exp=np.loadtxt(data_file_exp)
volume=data[:,0]
energy=data[:,1]
if flag_super:
if flag_static_vol:
volume=volume*snum
energy=energy*snum
info.min_static_vol=min(volume)
info.max_static_vol=max(volume)
info.static_points=len(volume)
deg=lo.data_freq[:,0]
num_set_freq=lo.data_freq.shape[1]-1
num_mode=lo.data_freq.shape[0]-1
int_mode=np.arange(num_mode+1)
if flag_super:
deg=deg/supercell.number
if not flag_ini:
ini=init_bm3(volume,energy)
data_vol_freq=[]
for iv in int_set:
data_vol_freq=np.append(data_vol_freq, data_vol_freq_orig[iv])
int_set_new=np.array([],dtype='int32')
ind=data_vol_freq.argsort()
for ind_i in ind:
int_set_new=np.append(int_set_new, int_set[ind_i])
if not np.array_equal(int_set, int_set_new):
print("\nWarning ** Volume and frequencies lists have been sorted")
print(" indexing: ", ind)
print("")
int_set=int_set_new
data_vol_freq.sort()
info.min_select_vol=min(data_vol_freq)
info.max_select_vol=max(data_vol_freq)
info.select_points=len(data_vol_freq)
volume_ctrl.set_all()
if flag_fit:
if flag_spline.flag:
flag_spline.stack()
elif flag_poly.flag:
flag_poly.stack()
if lo.flag:
lo_data=np.loadtxt(lo_freq_file)
lo.mode=lo_data[:,0].astype(int)
lo.split=lo_data[:,1].astype(float)
lo.on()
if disp.input and kieffer.input:
kieffer.flag=False
print("\nBoth Kieffer and phonon dispersion data were found in the input file")
print("The Kieffer model is therefore deactivated")
if b_flag:
print("")
disp.free_fit_vt()
def view():
"""
View input file (input.txt)
"""
input_file=path+"/input.txt"
view_input(input_file)
def view_input(input_file):
line_limit=1000
print("\nInput file\n")
with open(input_file) as fi:
jc=0
l0=['']
while (l0 !='END') and (jc < line_limit):
str=fi.readline()
lstr=str.split()
if lstr !=[]:
l0=lstr[0].rstrip()
if l0 !='#':
print(str.rstrip())
jc=jc+1
def reload_input(path):
reset_flag()
read_file(path)
static()
def load_disp(disp_info, disp_file):
"""
Load files containing data for the phonon dispersion correction. These
are the same files that could be also specified under the keyword DISP
in the input.txt file.
Args:
disp_info: name of the info file
disp_file: name of the frequencies' file
"""
disp.input_flag=True
disp.flag=True
disp.input=True
disp_file=path_orig+'/'+disp_file
disp_info=path_orig+'/'+disp_info
fd=open(disp_info)
line=fd.readline().rstrip().split()
disp.molt=int(line[0])
disp.fit_degree=int(line[1])
disp.fit_type=float(line[2])
disp.temp=float(line[3])
line=fd.readline().rstrip().split()
disp.numf=list(map(int, line))
line=fd.readline().rstrip().split()
disp.wgh=list(map(int, line))
line=fd.readline().rstrip().split()
disp.vol=list(map(float, line))
fd.close()
disp.error_flag=False
if len(disp.vol) == 1:
disp.error_flag=True
w_list=np.array([],dtype=int)
for iw in np.arange(disp.molt):
wl=np.repeat(disp.wgh[iw],disp.numf[iw])
w_list=np.append(w_list,wl)
disp.w_list=w_list
disp.f_size=disp.w_list.size
disp.freq=np.array([])
disp_data=np.loadtxt(disp_file)
disp.deg=disp_data[:,0]
nset=len(disp.vol)
disp.nset=nset
for iv in np.arange(nset):
disp.freq=np.append(disp.freq, disp_data[:,iv+1])
disp.freq=disp.freq.reshape(nset,disp.f_size)
if disp.fit_type == 0:
disp.freq_fit()
else:
disp.freq_spline_fit()
disp.free_fit(disp.temp,disp.vol[0])
print("Phonon dispersion data loaded from the file %s" % disp_file)
print("Info data from the file %s" % disp_info)
print("Phonon frequencies are computed at the volume(s) ", disp.vol)
print("\nUse disp.free_fit_ctrl to get free energy surfaces F(T) or F(V,T)")
def set_fix(fix=4.):
"""
Sets Kp to a value and keeps it fixed during fitting of EoS
Args:
fix (optional): Kp value. Default 4.
if fix=0, Kp if fixed to the last computed value stored in info.kp
The flag f_fit.flag is set to True
"""
if fix == 0:
fix=info.kp
f_fix.on(fix)
def reset_fix():
"""
Resets the fix Kp option: f_fit.flag=False
"""
f_fix.off()
def fix_status():
"""
Inquires about the setting concerning Kp
"""
print("Fix status: %r" % f_fix.flag)
if f_fix.flag:
print("Kp fixed at %4.2f" % f_fix.value )
def set_spline(degree=3,smooth=5, npoint=16):
"""
Sets spline fits of the frequencies as function of volume
Args:
degree (optional): degree of the spline (default: 3)
smooth (optional): smoothness of the spline (default: 5)
npoint (optional): number of points of the spline function
(default: 16)
"""
dv=0.2
flag_spline.on()
flag_poly.off()
flag_spline.set_degree(degree)
flag_spline.set_smooth(smooth)
fit_vol_exists=True
try:
flag_spline.fit_vol
except AttributeError:
fit_vol_exists=False
if not fit_vol_exists:
set_volume_range(min(data_vol_freq)-dv,max(data_vol_freq)+dv,npoint,\
prt=True)
else:
set_volume_range(min(flag_spline.fit_vol),max(flag_spline.fit_vol),npoint)
flag_spline.stack()
info.fit_type='spline'
info.fit_degree=degree
info.fit_smooth=smooth
info.fit_points=npoint
info.min_vol_fit=min(flag_spline.fit_vol)
info.max_vol_fit=max(flag_spline.fit_vol)
def set_poly(degree=4,npoint=16):
"""
Sets polynomial fits of the frequencies as function of volume
Args:
degree (optional): degree of the spline (default: 4)
npoint (optional): number of points of the polynomial function
(default: 16)
"""
dv=0.2
flag_poly.on()
flag_spline.off()
flag_poly.set_degree(degree)
fit_vol_exists=True
try:
flag_poly.fit_vol
except AttributeError:
fit_vol_exists=False
if not fit_vol_exists:
set_volume_range(min(data_vol_freq)-dv,max(data_vol_freq)+dv,npoint, \
prt=True)
else:
set_volume_range(min(flag_poly.fit_vol),max(flag_poly.fit_vol),npoint)
flag_poly.stack()
info.fit_type='poly'
info.fit_degree=degree
info.fit_points=npoint
info.min_vol_fit=min(flag_poly.fit_vol)
info.max_vol_fit=max(flag_poly.fit_vol)
def set_volume_range(vini,vfin,npoint=16,prt=False):
"""
Defines a volume range for the fitting of frequencies and EoS
in the case that SPLINE or POLY fits have been chosen
Args:
vini: minimum volume
vfin: maximum volume
npoint (optional): number of points in the volume range
"""
if flag_poly.flag:
flag_poly.vol_range(vini,vfin,npoint)
flag_poly.stack()
info.fit_points=npoint
info.min_vol_fit=min(flag_poly.fit_vol)
info.max_vol_fit=max(flag_poly.fit_vol)
if prt:
print("Volume range %8.4f - %8.4f defined for 'POLY' fit" %\
(vini, vfin))
elif flag_spline.flag:
flag_spline.vol_range(vini,vfin,npoint)
flag_spline.stack()
info.fit_points=npoint
info.min_vol_fit=min(flag_spline.fit_vol)
info.max_vol_fit=max(flag_spline.fit_vol)
if prt:
print("Volume range %8.4f - %8.4f defined for 'SPLINE' fit" %\
(vini, vfin))
else:
print("No fit of frequencies active\nUse set_poly or set_spline\n")
def fit_status():
if flag_poly.flag or flag_spline.flag:
print("Fit of frequencies is active")
if flag_spline.flag:
print("Spline fit: degree %2d, smooth: %3.1f" \
% (flag_spline.degree, flag_spline.smooth))
print("Volume range: %5.2f - %5.2f, points=%d" % \
(min(flag_spline.fit_vol), max(flag_spline.fit_vol), \
flag_spline.fit_vol.size))
else:
print("Polynomial fit: degree %2d" % flag_poly.degree)
print("Volume range: %5.2f - %5.2f, points=%d" % \
(min(flag_poly.fit_vol), max(flag_poly.fit_vol), \
flag_poly.fit_vol.size))
else:
print("Fitting is off")
def fit_off():
flag_poly.off()
flag_spline.off()
info.fit_type='No fit'
def quick_start(path):
"""
Quick start of the program.
Reads the input files found under the folder 'path'
whose name is written in the 'quick_start.txt' file
(found in the master folder).
Executes read_file; static (static equation of state)
and stacks data for the application of the Kieffer model,
if required with the optional 'KIEFFER' keyword in input.txt
"""
read_file(path)
static(plot=False)
if kieffer.flag:
free_stack_t(pr.kt_init, pr.kt_fin, pr.kt_points)
if verbose.flag:
print("Results from the Kieffer model for acoustic branches:")
print("plot of the Helmholtz free energy as a function of T.")
print("Temperature limits and number of points defined in parame.py")
kieffer.plot()
else:
print("Kieffer model for the acoustic branches activated")
def v_bm3(vv,v0,k0,kp,c):
"""
Volume integrated Birch-Murnaghan equation (3^rd order)
Args:
vv: volume
v0: volume at the minimum of the energy
k0: bulk modulus
kp: derivative of k0 with respect to P
c: energy at the minimum
Returns:
the energy at the volume vv
"""
v0v=(np.abs(v0/vv))**(2/3)
f1=kp*(np.power((v0v-1.),3))
f2=np.power((v0v-1.),2)
f3=6.-4*v0v
return c+(9.*v0*k0/16.)*(f1+f2*f3)
def bm3(vv,v0,k0,kp):
"""
Birch-Murnaghan equation (3^rd order)
Args:
vv: volume
v0: volume at the minimum of the energy
k0: bulk modulus
kp: derivative of k0 with respect to P
Returns:
the pressure at the volume vv
"""
v0v7=np.abs((v0/vv))**(7/3)
v0v5=np.abs((v0/vv))**(5/3)
v0v2=np.abs((v0/vv))**(2/3)
f1=v0v7-v0v5
f2=(3/4)*(kp-4)*(v0v2-1)
return (3*k0/2)*f1*(1+f2)
def bmx_tem(tt,**kwargs):
"""
V-BMx (volume integrated) fit at the selected temperature
Args:
tt: temperature
Keyword Args:
fix: if fix > 0.1, kp is fixed to the value 'fix'
during the optimization of the EoS.
(this is a valid option only for the BM3 fit,
but it is ignored for a BM4 EoS)
Returns:
1. free energy values at the volumes used for the fit
2. optimized v0, k0, kp, (kpp), and c
3. covariance matrix
Note:
bmx_tem optimizes the EoS according to several
possible options specified elsewhere:
1. kp fixed or free
2. frequencies not fitted, or fitted by
polynomials or splines
3. 3^rd or 4^th order BM EoS
Note:
bmx_tem includes energy contributions from static and vibrational
optical modes; acoustic contributions from the modified Kieffer
model are included, provided the KIEFFER keyword is in the input
file; contributions from anharmonic modes are included, provided
the ANH keyword is in the input file. NO dispersion correction
is included (even is the DISP keyword is provided).
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
flag_x=False
volb=data_vol_freq
if flag_poly.flag:
volb=flag_poly.fit_vol
elif flag_spline.flag:
volb=flag_spline.fit_vol
if f_fix.flag:
fix=f_fix.value
flag_x=True
p0_f=[ini[0],ini[1],ini[3]]
if fixpar:
if fix_value < 0.1:
flag_x=False
else:
fix=fix_value
flag_x=True
p0_f=[ini[0],ini[1],ini[3]]
if flag_poly.flag or flag_spline.flag:
free_energy=free_fit(tt)
else:
free_energy=free(tt)
if (flag_x) and (not bm4.flag):
pterm, pcov_term = curve_fit(lambda volb, v0, k0, c: \
v_bm3(volb, v0, k0, fix, c), \
volb, free_energy, p0=p0_f, \
ftol=1e-15, xtol=1e-15)
pterm=np.append(pterm,pterm[2])
pterm[2]=fix
else:
if bm4.flag:
if f_fix.flag:
reset_fix()
fix_status()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pterm, pcov_term= curve_fit(bm4.energy, volb, free_energy,\
method='dogbox',p0=bm4.en_ini, ftol=1e-18, xtol=3.e-16,gtol=1e-18)
bm4.store(pterm)
else:
pterm, pcov_term = curve_fit(v_bm3, volb, free_energy, \
p0=ini, ftol=1e-15, xtol=1e-15)
return [free_energy, pterm, pcov_term]
def bulk_conversion(kk):
"""
Bulk modulus unit conversion (from atomic units to GPa)
"""
kc=kk*conv/1e-21
print("Bulk modulus: %8.4e a.u. = %6.2f GPa" % (kk, kc))
def stop():
"""
used to exit from the program in case of fatal exceptions
"""
while True:
print("Program will be terminated due to errors in processing data")
answ=input('Press enter to quit')
sys.exit(1)
def bm4_def():
V0=sym.Symbol('V0',real=True,positive=True)
V=sym.Symbol('V',real=True,positive=True)
f=sym.Symbol('f',real=True)
kp=sym.Symbol('kp',real=True)
ks=sym.Symbol('ks',real=True)
k0=sym.Symbol('k0',real=True)
P=sym.Symbol('P',real=True,positive=True)
E0=sym.Symbol('E0',real=True)
c=sym.Symbol('c',real=True)
f=((V0/V)**sym.Rational(2,3)-1)/2
P=3*k0*f*((1+2*f)**sym.Rational(5,2))*(1+sym.Rational(3,2)*(kp-4.)*f +\
sym.Rational(3,2)*(k0*ks+(kp-4.)*(kp-3.)+sym.Rational(35,9))*(f**2))
E=sym.integrate(P,V)
E0=E.subs(V,V0)
E=E0-E+c
bm4_energy=sym.lambdify((V,V0,k0,kp,ks,c),E,'numpy')
bm4_pressure=sym.lambdify((V,V0,k0,kp,ks),P,'numpy')
return bm4_energy, bm4_pressure
def init_bm4(vv,en,kp):
"""
Function used to estimate the initial parameters of a V-integrated BM4
EoS. The function is used by the method "estimates" of the bm4 class.
The estimation is done on the basis of a previous BM3 optimization
whose initial parameters are provided by the current function.
Args:
vv (list): volumes
en (list): static energies at the corresponding volumes vv
kp:initail value assigned to kp
Returns:
"ini" list of V-integrated EoS parameters (for a BM3) estimated by a
polynomial fit: v_ini, k0_ini, kp, e0_ini.
Note: such parameters are used as initial guesses for the BM3 optimization
performed by the method "estimates" of the class bm4 that, in turn,
outputs the "ini" list for the BM4 EoS optimization.
"""
pol=np.polyfit(vv,en,4)
pder1=np.polyder(pol,1)
pder2=np.polyder(pol,2)
v_r=np.roots(pder1)
vs=v_r*np.conj(v_r)
min_r=np.argmin(vs)
v_ini=np.real(v_r[min_r])
e0_ini=np.polyval(pol, v_ini)
k0_ini=np.polyval(pder2, v_ini)
k0_ini=k0_ini*v_ini
ini=[v_ini, k0_ini, kp, e0_ini]
return ini
def init_bm3(vv,en):
"""
Estimates initial parameters for the V-integrated BM3 EoS in case
the INI keyword is not present in "input.txt"
Args:
vv (list): volumes
en (list): static energies at the corresponding volumes vv
Returns:
"ini" list of V-integrated EoS parameters estimated by a
polynomial fit: v_ini, k0_ini, kp, e0_ini. kp is set to 4.
Note:
such parameters are used as initial guesses for the bm3 optimization.
"""
kp_ini=4.
pol=np.polyfit(vv,en,3)
pder1=np.polyder(pol,1)
pder2=np.polyder(pol,2)
v_r=np.roots(pder1)
vs=v_r*np.conj(v_r)
min_r=np.argmin(vs)
v_ini=np.real(v_r[min_r])
e0_ini=np.polyval(pol, v_ini)
k0_ini=np.polyval(pder2, v_ini)
k0_ini=k0_ini*v_ini
ini=[v_ini, k0_ini, kp_ini, e0_ini]
return ini
# Output the pressure at a given temperature (tt) and volume (vv).
# Kp can be kept fixed (by setting fix=Kp > 0.1)
def pressure(tt,vv,**kwargs):
"""
Computes the pressure at a temperature and volume
Args:
tt: temperature
vv: unit cell volume
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
[ff,veos,err]=bmx_tem(tt,fix=fix_value)
else:
[ff,veos,err]=bmx_tem(tt)
if bm4.flag:
eos=veos[0:4]
return round(bm4.pressure(vv,*eos)*conv/1e-21,3)
else:
eos=veos[0:3]
return round(bm3(vv,*eos)*conv/1e-21,3)
def pressure_dir(tt,vv):
"""
Computes the pressure at a given volume and temperature from
the numerical derivative of the Helmholtz free energy with
respect to the volume (at constant temperature).
Args:
tt: temperature (K)
vv: volume (A^3)
"""
deg=pr.degree_v
if not vd.flag:
vmin=vv-pr.delta_v/2.
vmax=vv+pr.delta_v/2.
else:
vmin=vv-vd.delta/2.
vmax=vv+vd.delta/2.
v_range=np.linspace(vmin,vmax,pr.nump_v)
f_list=np.array([])
for iv in v_range:
fi=free_fit_vt(tt,iv)
f_list=np.append(f_list,fi)
vfit=np.polyfit(v_range,f_list,deg)
vfitder=np.polyder(vfit,1)
press=-1*np.polyval(vfitder,vv)
return press*conv/1e-21
def volume_dir(tt,pp,alpha_flag_1=False, alpha_flag_2=False):
"""
Computes the equilibrium volume at a given temperature and pressure
without using an equation of state.
An initial estimation of the volume is however obtained by using
a BM3 EoS, by calling the eos_temp function; such volume is stored
in the v_new variable.
A list of volumes around the v_new value is then built and, for each
value in the list, a pressure is computed by using the pressure_dir
function, and compared to the input pressure to find the volume
at which the two pressures are equal.
A number of parameters are used to control the computation. They are
all defined by the volume-control driver (volume_ctrl). Convenient
values are already set by default, but they can be changed by using
the method volume_ctrl.set_all. Use the info.show method to get such
values under the 'volume driver section'.
"""
vol_opt.on()
if volume_ctrl.kp_fix:
reset_fix()
if tt < volume_ctrl.t_max:
eos_temp(tt,kp_only=True)
else:
eos_temp(volume_ctrl.t_max,kp_only=True)
set_fix(0)
if (alpha_flag_1) and (not alpha_flag_2):
reset_fix()
eos_temp(tt,kp_only=True)
set_fix(0)
vini=new_volume(tt,pp)
v_new=vini[0] # Initial volume from EoS
if volume_ctrl.t_last_flag:
vini=volume_ctrl.v_last
if (tt > volume_ctrl.t_last) & (volume_ctrl.t_last > 10.):
volume_ctrl.t_last_flag=True
volume_ctrl.shift=0.
volume_ctrl.upgrade_shift=False
if not flag_poly.flag:
if flag_fit_warning.value:
print("It is advised to use polynomial fits for 'dir' calculations\n")
fit_status()
print("")
flag_fit_warning.value=False
if flag_poly.flag:
volume_max=max(flag_poly.fit_vol)
volume_min=min(flag_poly.fit_vol)
if flag_spline.flag:
volume_max=max(flag_spline.fit_vol)
volume_min=min(flag_spline.fit_vol)
if flag_poly.flag:
if vini > volume_max:
flag_volume_max.value=True
if flag_volume_warning.value:
flag_volume_warning.value=False
print("Warning: volume exceeds the maximum value set in volume_range")
print("Volume: %8.4f" % vini)
fit_status()
print("")
# return vini
if flag_spline.flag:
if vini > volume_max:
flag_volume_max.value=True
if flag_volume_warning.value:
flag_volume_warning.value=False
print("Warning: volume exceeds the maximum value set in volume_range")
print("Volume: %8.4f" % vini)
fit_status()
print("")
# return vini
vvi=vini
if volume_ctrl.t_last_flag:
if (tt > volume_ctrl.t_last) & (volume_ctrl.t_last > 10.):
vvi=volume_ctrl.v_last
vplot=vvi
v_list=np.linspace(vvi - volume_ctrl.delta/volume_ctrl.left,\
vvi + volume_ctrl.delta/volume_ctrl.right, 24)
else:
if tt > volume_ctrl.t_dump:
volume_ctrl.shift=volume_ctrl.shift/volume_ctrl.dump
v_list=np.linspace(vini[0]-volume_ctrl.shift - volume_ctrl.delta/volume_ctrl.left,\
vini[0]-volume_ctrl.shift + volume_ctrl.delta/volume_ctrl.right, 24)
vplot=vini[0]
p_list=np.array([])
for iv in v_list:
pi=(pressure_dir(tt,iv)-pp)**2
p_list=np.append(p_list,pi)
fitv=np.polyfit(v_list,p_list,volume_ctrl.degree)
pressure=lambda vv: np.polyval(fitv,vv)
min_p=np.argmin(p_list)
vini=[v_list[min_p]]
if volume_ctrl.degree > 2:
bound=[(volume_min, volume_max)]
vmin=minimize(pressure,vini,method='L-BFGS-B', bounds=bound, tol=1e-10,
options={'gtol':1e-10, 'maxiter':500})
shift=v_new-vmin.x[0]
else:
shrink=volume_ctrl.quad_shrink
new_v=np.linspace(vini[0]-volume_ctrl.delta/shrink, vini[0]+volume_ctrl.delta/shrink,8)
new_p=np.array([])
for iv in new_v:
pi=(pressure_dir(tt,iv)-pp)**2
new_p=np.append(new_p,pi)
fit_new=np.polyfit(new_v, new_p,2)
der_new=np.polyder(fit_new,1)
vmin=-1*der_new[1]/der_new[0]
shift=v_new-vmin
if volume_ctrl.upgrade_shift:
volume_ctrl.shift=shift
if volume_ctrl.degree > 2:
if volume_ctrl.debug:
x1=np.mean(v_list)
x2=np.min(v_list)
x=(x1+x2)/2
y=0.95*np.max(p_list)
y2=0.88*np.max(p_list)
y3=0.81*np.max(p_list)
y4=0.74*np.max(p_list)
plt.figure()
title="Temperature: "+str(round(tt,2))+" K"
plt.plot(v_list,p_list)
plt.xlabel("V (A^3)")
plt.ylabel("Delta_P^2 (GPa^2)")
plt.title(title)
v_opt="Opt volume: "+str(vmin.x[0].round(4))
v_min="Approx volume: "+str(vini[0].round(4))
v_new="EoS volume: "+str(v_new.round(4))
v_ini="V_ini volume: "+str(vplot.round(4))
plt.text(x,y,v_opt,fontfamily='monospace')
plt.text(x,y2,v_min, fontfamily='monospace')
plt.text(x,y3,v_new,fontfamily='monospace')
plt.text(x,y4,v_ini,fontfamily='monospace')
plt.show()
else:
if volume_ctrl.debug:
x1=np.mean(v_list)
x2=np.min(v_list)
x=(x1+x2)/2
y=0.95*np.max(p_list)
y2=0.88*np.max(p_list)
y3=0.81*np.max(p_list)
y4=0.74*np.max(p_list)
plt.figure()
title="Temperature: "+str(round(tt,2))+" K"
plt.plot(v_list,p_list)
plt.plot(new_v, new_p,"*")
plt.xlabel("V (A^3)")
plt.ylabel("Delta_P^2 (GPa^2)")
plt.title(title)
v_opt="Opt. volume: "+str(round(vmin,4))
v_min="Approx volume: "+str(vini[0].round(4))
v_new="EoS Volume: "+str(v_new.round(4))
v_ini="V_ini volume: "+str(vplot.round(4))
plt.text(x,y,v_opt,fontfamily='monospace')
plt.text(x,y2,v_min, fontfamily='monospace')
plt.text(x,y3,v_new,fontfamily='monospace')
plt.text(x,y4,v_ini,fontfamily='monospace')
plt.show()
if volume_ctrl.degree > 2:
test=vmin.success
if not test:
print("\n**** WARNING ****")
print("Optimization in volume_dir not converged; approx. volume returned")
print("temperature: %5.2f, Volume: %6.3f" % (tt, vini[0]))
volume_ctrl.v_last=vini[0]
vol_opt.off()
return vini[0]
else:
volume_ctrl.v_last=vini[0]
return vmin.x[0]
else:
volume_ctrl.v_last=vmin
return vmin
def volume_from_F(tt, shrink=10., npoints=60, debug=False):
"""
Computation of the equilibrium volume at any given temperature
and at 0 pressure. The algorithm looks for the minimum of the
Helmholtz function with respect to V (it is equivalent to the
minimization of the Gibbs free energy function as the pressure is
zero. The methods is very similar to that implemented in the
more general volume_dir function, but it does not require the
calculation of any derivative of F (to get the pressure).
The Helmholtz free energy is computed by means of the free_fit_vt
function.
Args:
tt: temperature (in K)
npoints: number of points in the V range (centered around an
initial volume computed by the volume_dir function),
where the minimum of F is to be searched (default 60).
shrink: shrinking factor for the definition of the V-range for
the optimization of V (default 10).
debug: plots and prints debug information. If debug=False, only
the optimized value of volume is returned.
Note:
The function makes use of parameters sets by the methods of
the volume_F_ctrl instance of the volume_F_control_class class.
In particular, the initial value of volume computed by the
volume_dir function can be shifted by the volume_F_ctrl.shift
value. This value is set by the volume_F_ctrl.set_shift method
provided that the volume_F_ctrl.upgrade_shift flag is True.
"""
delta=volume_ctrl.delta
d2=delta/2.
vini=volume_dir(tt,0)
if volume_F_ctrl.get_flag():
shift=volume_F_ctrl.get_shift()
vini=vini+shift
v_eos=new_volume(tt,0)[0]
vlist=np.linspace(vini-d2, vini+d2, npoints)
flist=list(free_fit_vt(tt, iv) for iv in vlist)
imin=np.argmin(flist)
vmin=vlist[imin]
vlist2=np.linspace(vmin-d2/shrink, vmin+d2/shrink, 8)
flist2=list(free_fit_vt(tt, iv) for iv in vlist2)
fit=np.polyfit(vlist2,flist2,2)
fitder=np.polyder(fit,1)
vref=-fitder[1]/fitder[0]
fref=np.polyval(fit, vref)
v_shift=vref-vini
if volume_F_ctrl.get_flag() & volume_F_ctrl.get_upgrade_status():
volume_F_ctrl.set_shift(v_shift)
vplot=np.linspace(vref-d2/shrink, vref+d2/shrink, npoints)
fplot=np.polyval(fit, vplot)
if debug:
xt=vlist2.round(2)
title="F free energy vs V at T = "+str(tt)+" K"
plt.figure()
ax=plt.gca()
ax.ticklabel_format(useOffset=False)
plt.plot(vlist2, flist2, "k*", label="Actual values")
plt.plot(vplot, fplot, "k-", label="Quadratic fit")
plt.plot(vref,fref,"r*", label="Minimum from fit")
plt.legend(frameon=False)
plt.xlabel("Volume (A^3)")
plt.ylabel("F (a.u.)")
plt.xticks(xt)
plt.title(title)
plt.show()
print("\nInitial volume from volume_dir: %8.4f" % vini)
print("Volume from EoS fit: %8.4f" % v_eos)
print("Approx. volume at minimum F (numerical): %8.4f" % vmin)
print("Volume at minimum (from fit): %8.4f\n" % vref)
return vref
else:
return vref
def volume_from_F_serie(tmin, tmax, npoints, fact_plot=10, debug=False, expansion=False, degree=4,
fit_alpha=False, export=False, export_alpha=False, export_alpha_fit=False):
"""
Volume and thermal expansion (at zero pressure) in a range of temperatures,
computed by the minimization of the Helmholtz free energy function.
Args:
tmin, tmax, npoints: minimum, maximum and number of points defining
the T range
fact_plot: factor used to compute the number of points for the plot
(default 10)
debug: debugging information (default False)
expansion: computation of thermal expansion (default False)
degree: if expansion=True, in order to compute the thermal expansion
a log(V) vs T polynomial fit of degree 'degree' is performed
(default 4)
fit_alpha: thermal expansion is fitted to a power serie (default False)
export: list of computed volume is exported (default False)
export_alpha_fit: coefficients of the power series fitting the alpha's
are exported
Note:
Thermal expansion is computed from a log(V) versus T polynomial fit
Note:
if export is True, the volume list only is exported (and the function
returns) no matter if expansion is also True (that is, thermal expansion
is not computed). Likewise, if export_alfa is True, no fit of the thermal
expansion data on a power serie is performed (and, therefore, such data from
the fit cannot be exported).
Note:
Having exported the coefficients of the power serie fitting the alpha values,
they can be uploaded to a particular phase by using the load_alpha method
of the mineral class; e.g. py.load_alpha(alpha_fit, power_a)
Examples:
>>> alpha_fit=volume_from_F_serie(100, 400, 12, expansion=True, fit_alpha=True, export_alpha_fit=True)
>>> py.load_alpha(alpha_fit, power_a)
>>> py.info()
"""
t_list=np.linspace(tmin, tmax, npoints)
v_list=list(volume_from_F(it, debug=debug) for it in t_list)
if export:
return v_list
plt.figure()
plt.plot(t_list, v_list, "k-")
plt.xlabel("T (K)")
plt.ylabel("V (A^3)")
plt.title("Volume vs Temperature at zero pressure")
plt.show()
if expansion:
logv=np.log(v_list)
fit=np.polyfit(t_list, logv, degree)
fitder=np.polyder(fit, 1)
alpha_list=np.polyval(fitder, t_list)
if export_alpha:
return alpha_list
t_plot=np.linspace(tmin, tmax, npoints*fact_plot)
lv_plot=np.polyval(fit, t_plot)
label_fit="Polynomial fit, degree: "+str(degree)
plt.figure()
plt.title("Log(V) versus T")
plt.xlabel("T (K)")
plt.ylabel("Log(V)")
plt.plot(t_list, logv, "k*", label="Actual values")
plt.plot(t_plot, lv_plot, "k-", label=label_fit)
plt.legend(frameon=False)
plt.show()
plt.figure()
plt.title("Thermal expansion")
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.plot(t_list, alpha_list, "k*", label="Actual values")
if fit_alpha:
if not flag_alpha:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
else:
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
alpha_value=[]
for ict in t_plot:
alpha_i=alpha_dir_fun(ict,*alpha_fit)
alpha_value=np.append(alpha_value,alpha_i)
plt.plot(t_plot,alpha_value,"k-", label="Power serie fit")
plt.legend(frameon=False)
plt.show()
if export_alpha_fit & flag_alpha & fit_alpha:
return alpha_fit
def volume_conversion(vv, atojb=True):
"""
Volume conversion from/to unit cell volume (in A^3) to/from the molar volume
(in J/bar)
Args:
vv: value of volume (in A^3 or J/bar)
atojb: if aotjb is True (default), conversion is from A^3 to J/bar
if atojb is False, conversion is from J/bar to A^3
"""
if atojb:
vv=vv*avo*1e-25/zu
print("Molar volume: %7.4f J/bar" % vv)
else:
vv=vv*zu*1e25/avo
print("Cell volume: %7.4f A^3" % vv)
def find_temperature_vp(vv,pp, tmin=100., tmax=1000., prt=True):
nt=50
t_list=np.linspace(tmin,tmax,nt)
v_list=list(volume_dir(it,pp) for it in t_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
t_0=t_list[min_diff]
delta=20.
t_min=t_0-delta
t_max=t_0+delta
t_list=np.linspace(t_min,t_max,nt)
v_list=list(volume_dir(it,pp) for it in t_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
t_0f=t_list[min_diff]
if prt:
print("Temperature found:")
print("First guess %5.2f; result: %5.2f K" % (t_0, t_0f))
else:
return t_0f
def find_pressure_vt(vv,tt, pmin, pmax, prt=True):
npp=50
p_list=np.linspace(pmin,pmax,npp)
v_list=list(volume_dir(tt,ip) for ip in p_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
p_0=p_list[min_diff]
delta=0.5
p_min=p_0-delta
p_max=p_0+delta
p_list=np.linspace(p_min,p_max,npp)
v_list=list(volume_dir(tt,ip) for ip in p_list)
diff_l=list((v_list[idx]-vv)**2 for idx in np.arange(len(v_list)))
min_diff=np.argmin(diff_l)
p_0f=p_list[min_diff]
if prt:
print("Pressure found:")
print("First guess %5.2f; result: %5.2f GPa" % (p_0, p_0f))
else:
return p_0f
def bulk_dir(tt,prt=False, out=False, **kwargs):
"""
Optimizes a BM3 EoS from volumes and total pressures at a given
temperature. In turn, phonon pressures are directly computed as volume
derivatives of the Helmholtz function; static pressures are from a V-BM3
fit of E(V) static data.
Negative pressures are excluded from the computation.
Args:
tt: temperature
prt (optional): if True, prints a P(V) list; default: False
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
"""
flag_volume_max.value=False
l_arg=list(kwargs.items())
fixpar=False
flag_serie=False
vol_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if 'serie' == karg_i[0]:
flag_serie=karg_i[1]
if 'volume' == karg_i[0]:
vol_flag=karg_i[1]
[dum,pterm,dum]=bmx_tem(tt)
ini=pterm[0:3]
flag_x=False
if f_fix.flag:
fix=f_fix.value
flag_x=True
p0_f=[ini[0],ini[1]]
if fixpar:
if fix_value < 0.1:
flag_x=False
else:
fix=fix_value
flag_x=True
p0_f=[ini[0],ini[1]]
if flag_spline.flag:
v_list=flag_spline.fit_vol
elif flag_poly.flag:
v_list=flag_poly.fit_vol
else:
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
f_fix_orig=f_fix.flag
volmax=volume_dir(tt,0.)
if flag_volume_max.value:
print("Computation stop. Use set_volume_range to fix the problem")
stop()
volnew=np.append(v_list,volmax)
p_list=np.array([])
for vi in volnew:
pi=pressure_dir(tt,vi)
p_list=np.append(p_list,pi)
v_new=np.array([])
p_new=np.array([])
for iv in zip(volnew,p_list):
if iv[1]>=-0.01:
v_new=np.append(v_new,iv[0])
p_new=np.append(p_new,iv[1])
try:
if flag_x:
pdir, pcov_dir = curve_fit(lambda v_new, v0, k0: \
bm3(v_new, v0, k0, fix), \
v_new, p_new, p0=p0_f, method='dogbox',\
ftol=1e-15, xtol=1e-15)
else:
pdir, pcov_dir = curve_fit(bm3, v_new, p_new, \
method='dogbox', p0=ini[0:3], ftol=1e-15, xtol=1e-15)
perr_t=np.sqrt(np.diag(pcov_dir))
except RuntimeError:
print("EoS optimization did not succeeded for t = %5.2f" % tt)
flag_dir.on()
if flag_serie:
return 0,0
else:
return
if flag_x:
pdir=np.append(pdir,fix)
perr_t=np.append(perr_t,0.00)
if flag_serie and vol_flag:
return pdir[0],pdir[1],pdir[2]
if flag_serie:
return pdir[1],pdir[2]
if out:
return pdir[0], pdir[1], pdir[2]
print("\nBM3 EoS from P(V) fit\n")
print("K0: %8.2f (%4.2f) GPa" % (pdir[1],perr_t[1]))
print("Kp: %8.2f (%4.2f) " % (pdir[2],perr_t[2]))
print("V0: %8.4f (%4.2f) A^3" % (pdir[0],perr_t[0]))
info.temp=tt
info.k0=pdir[1]
info.kp=pdir[2]
info.v0=pdir[0]
vol=np.linspace(min(v_new),max(v_new),16)
press=bm3(vol,*pdir)
plt.figure()
plt.title("BM3 fit at T = %5.1f K\n" % tt)
plt.plot(v_new,p_new,"k*")
plt.plot(vol,press,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("Pressure (GPa)")
plt.show()
if not f_fix_orig:
reset_fix()
if prt:
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in zip(v_new,p_new):
print(" %5.3f %5.2f" % (vp_i[0], vp_i[1]))
def bulk_dir_serie(tini, tfin, npoints, degree=2, update=False, **kwargs):
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_serie=np.linspace(tini, tfin, npoints)
tx_serie=np.array([])
b_serie=np.array([])
for ti in t_serie:
flag_dir.off()
if not fixpar:
bi,kpi=bulk_dir(ti,serie=True)
else:
bi,kpi=bulk_dir(ti, serie=True, fix=fix_value)
if not flag_dir.value:
b_serie=np.append(b_serie,bi)
tx_serie=np.append(tx_serie,ti)
else:
pass
t_serie=tx_serie
plt.figure()
plt.plot(t_serie,b_serie,"k*")
plt.title("Bulk modulus (K0)")
plt.xlabel("T(K)")
plt.ylabel("K (GPa)")
plt.title("Bulk modulus as a function of T")
fit_b=np.polyfit(t_serie,b_serie,degree)
b_fit=np.polyval(fit_b,t_serie)
plt.plot(t_serie,b_fit,"k-")
print("\nResults from the fit (from high to low order)")
np.set_printoptions(formatter={'float': '{: 4.2e}'.format})
print(fit_b)
np.set_printoptions(formatter=None)
plt.show()
if update:
return fit_b
volume_ctrl.shift=0.
def bm4_dir(tt,prt=True):
"""
Optimizes a BM4 EoS from volumes and total pressures at a given
temperature. Negative pressures are excluded from the computation.
Args:
tt: temperature
prt (optional): if True, prints a P(V) list; default: False
"""
flag_volume_max.value=False
start_bm4()
if flag_spline.flag:
v_list=flag_spline.fit_vol
elif flag_poly.flag:
v_list=flag_poly.fit_vol
else:
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
volmax=volume_dir(tt,0.)
if flag_volume_max.value:
print("Computation stop. Use set_volume_range to fix the problem")
stop()
volnew=np.append(v_list,volmax)
p_list=np.array([])
for vi in volnew:
pi=pressure_dir(tt,vi)
p_list=np.append(p_list,pi)
v_new=np.array([])
p_new=np.array([])
for iv in zip(volnew,p_list):
if iv[1]>=-0.01:
v_new=np.append(v_new,iv[0])
p_new=np.append(p_new,iv[1])
ini=np.copy(bm4.en_ini[0:4])
ini[1]=ini[1]*conv*1e21
pdir, pcov_dir = curve_fit(bm4.pressure, v_new, p_new, \
p0=ini, ftol=1e-15, xtol=1e-15)
perr_t=np.sqrt(np.diag(pcov_dir))
print("\nBM4 EoS from P(V) fit\n")
print("K0: %8.2f (%4.2f) GPa" % (pdir[1],perr_t[1]))
print("Kp: %8.2f (%4.2f) " % (pdir[2],perr_t[2]))
print("Kpp: %8.2f (%4.2f) " % (pdir[3], perr_t[3]))
print("V0: %8.4f (%4.2f) A^3" % (pdir[0],perr_t[0]))
vol=np.linspace(min(v_new),max(v_new),16)
press=bm4.pressure(vol,*pdir)
plt.figure()
plt.title("BM4 fit at T = %5.1f K\n" % tt)
plt.plot(v_new,p_new,"k*")
plt.plot(vol,press,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("Pressure (GPa)")
plt.show()
if prt:
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in zip(v_new,p_new):
print(" %5.3f %5.2f" % (vp_i[0], vp_i[1]))
def bulk_modulus_p(tt,pp,noeos=False,prt=False,**kwargs):
"""
Bulk modulus at a temperature and pressure
Args:
tt: temperature
pp: pressure
noeos: to compute pressures, the bm3 EoS is used if
noeos=False (default); otherwise the EoS is
used only for the static part, and vibrational
pressures are obtained from the derivative
of the F function (pressure_dir function)
prt: if True, results are printed
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1. This is relevant
if noeos=False
The values are computed through the direct derivative -V(dP/dV)_T.
Since the computation of pressure requires the bm3_tem function
(if noeos=False) Kp can be kept fixed by setting fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if not noeos:
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)[0]
else:
vol=new_volume(tt,pp)[0]
else:
vol=volume_dir(tt,pp)
if not vd.flag:
delta=pr.delta_v
else:
delta=vd.delta
numv=pr.nump_v
degree=pr.degree_v
v_range=np.linspace(vol-delta/2.,vol+delta/2.,numv)
press_range=[]
for iv in v_range:
if not noeos:
if fixpar:
p_i=pressure(tt,iv,fix=fix_value)
else:
p_i=pressure(tt,iv)
else:
p_i=pressure_dir(tt,iv)
press_range=np.append(press_range,p_i)
press_fit=np.polyfit(v_range,press_range,degree)
b_poly=np.polyder(press_fit,1)
b_val=np.polyval(b_poly,vol)
b_val=(-1*b_val*vol)
if prt:
eos=str(noeos)
print("Bulk Modulus at T = %5.1f K and P = %3.1f GPa, noeos = %s: %6.3f GPa, V = %6.3f " %\
(tt,pp,eos,b_val, vol))
else:
b_val=round(b_val,3)
return b_val, vol
def bulk_modulus_p_serie(tini, tfin, nt, pres, noeos=False, fit=False, type='poly', \
deg=2, smooth=5, out=False, **kwargs):
"""
Computes the bulk modulus from the definition K=-V(dP/dV)_T in a range
of temperature values
Args:
tini: lower temperature in the range
tfin: higher temperature in the range
nt: number of points in the [tini, tfin] range
pres: pressure (GPa)
noeos: see note below
fit: if True, a fit of the computed K(T) values is performed
type: type of the fit ('poly', or 'spline')
deg: degree of the fit
smooth: smooth parameter for the fit; relevant if type='spline'
out: if True, the parameters of the K(T) and V(T) fits are printed
Keyword Args:
fix: if fix is provided, Kp is kept fixed at the fix value
Relevant if noeos=False
Note:
if noeos=False, the pressure at any given volume is calculated
from the equation of state. If noeos=True, the pressure is computed
as the first derivative of the Helmholtz function (at constant
temperature)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_list=np.linspace(tini, tfin, nt)
b_l=np.array([])
t_l=np.array([])
v_l=np.array([])
if fixpar:
for it in t_list:
ib, v_val=bulk_modulus_p(it,pres,noeos=noeos,fix=fix_value)
if vol_opt.flag:
b_l=np.append(b_l,ib)
t_l=np.append(t_l,it)
v_l=np.append(v_l,v_val)
else:
for it in t_list:
ib,v_val=bulk_modulus_p(it,pres,noeos=noeos)
if vol_opt.flag:
t_l=np.append(t_l,it)
b_l=np.append(b_l,ib)
v_l=np.append(v_l,v_val)
if fit:
t_fit=np.linspace(tini,tfin,50)
if type=='poly':
fit_par=np.polyfit(t_l,b_l,deg)
b_fit=np.polyval(fit_par,t_fit)
fit_par_v=np.polyfit(t_l,v_l,deg)
v_fit=np.polyval(fit_par_v,t_fit)
elif type=='spline':
fit_par=UnivariateSpline(t_l,b_l,k=deg,s=smooth)
b_fit=fit_par(t_fit)
fit_par_v=UnivariateSpline(t_l,v_l,k=deg,s=0.1)
v_fit=fit_par_v(t_fit)
method='poly'
if type=='spline':
method='spline'
lbl=method+' fit'
plt.figure()
plt.plot(t_l,b_l,"k*",label='Actual values')
if fit:
plt.plot(t_fit, b_fit,"k-",label=lbl)
plt.xlabel("Temperature (K)")
plt.ylabel("K (GPa)")
tlt="Bulk modulus at pressure "+str(pres)
plt.title(tlt)
plt.legend(frameon=False)
plt.show()
reset_fix()
if out & fit:
return fit_par, fit_par_v
def bulk_modulus_adiabat(tt,pp,noeos=False, prt=True,**kwargs):
"""
Adiabatic bulk modulus at a temperature and pressure
Args:
tt: temperature
pp: pressure
fix (optional): optimizes Kp if fix=0., or keeps Kp
fixed if fix=Kp > 0.1
The values are computed through the direct derivative -V(dP/dV)_T.
Since the computation of pressure requires the bm3_tem function,
Kp can be kept fixed by setting fix=Kp > 0.1
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)[0]
alpha,kt_dum,pr=thermal_exp_v(tt,vol,False,fix=fix_value)
kt,_=bulk_modulus_p(tt,pp,noeos=noeos,fix=fix_value)
ent,cv=entropy_v(tt,vol,False,False,fix=fix_value)
else:
vol=new_volume(tt,pp)[0]
alpha,kt_dum,pr=thermal_exp_v(tt,vol,False)
kt,_=bulk_modulus_p(tt,pp,noeos=noeos)
ent,cv=entropy_v(tt,vol,False,False)
volm=(vol*avo*1e-30)/zu
ks=kt*(1+volm*(tt*1e9*kt*alpha**2)/cv)
if prt:
print("\nAdiabatic bulk modulus Ks: %5.2f GPa" % ks)
print("Isoth. Kt: %5.2f GPa, alpha: %5.2e K^-1, sp. heat Cv: %6.2f J/mol K"\
% (kt, alpha, cv))
print("Cell volume: %6.2f A^3, molar volume %6.2f cm^3" % (vol, 1e6*volm))
else:
return ks
def static(plot=False, vmnx=[0., 0.]):
"""
Static EoS
Args:
plot: plot of the E(V) curve
vmnx: array of two reals [vmin and vmax]; vmin is the
minimum volume and vmax is the maximum volume.
If vmin and vmax are both 0., the whole V range
is used (as specified in the static energies file).
Default=[0., 0.]
Note:
The volume range can also be modified by using the methods
of the static_volume class
Examples:
>>> static_volume.set(100., 120.)
>>> static_volume.on()
>>> static(plt=True)
Computes the static EoS in the [100., 120.] volume range. The same
is obtained with
>>> static(plt=True, vmnx=[100., 120.])
However, with the first method the defined volume range is recorded for
future computations; by using the second method, the volume range is reset
to the original one, once the fit is performed.
"""
global pcov
if flag_err:
return None
vol_flag=False
if static_range.flag:
vol_min=static_range.vmin
vol_max=static_range.vmax
vol_flag=True
else:
if (vmnx[0] > 0.1) or (vmnx[1] > 0.1):
vol_flag=True
vol_min=vmnx[0]
vol_max=vmnx[1]
if vol_flag:
vol_select=(volume >= vol_min) & (volume <= vol_max)
vol_selected=volume[vol_select]
energy_selected=energy[vol_select]
if not vol_flag:
popt, pcov = curve_fit(v_bm3, volume, energy, p0=ini,ftol=1e-15,xtol=1e-15)
else:
popt, pcov = curve_fit(v_bm3, vol_selected, energy_selected, p0=ini,ftol=1e-15,xtol=1e-15)
k_gpa=popt[1]*conv/1e-21
kp=popt[2]
v0=popt[0]
perr=np.sqrt(np.diag(pcov))
ke=perr[1]*conv/1e-21
print("\nStatic BM3 EoS")
print("\nBulk Modulus: %5.2f (%4.2f) GPa" % (k_gpa, ke))
print("Kp: %5.2f (%4.2f)" % (kp, perr[2]))
print("V0: %5.4f (%4.2f) A^3" % (v0, perr[0]))
print("E0: %5.8e (%4.2e) hartree" % (popt[3], perr[3]))
if vol_flag:
print("\nStatic EoS computed in a restricted volume range:")
print(vol_selected)
print("\n")
info.k0_static=k_gpa
info.kp_static=kp
info.v0_static=v0
info.popt=popt
info.popt_orig=popt
vd.set_delta(v0)
vol_min=np.min(volume)
vol_max=np.max(volume)
nvol=50
vol_range=np.linspace(vol_min,vol_max,nvol)
if plot:
plt.figure(0)
plt.title("E(V) static BM3 curve")
plt.plot(volume,energy,"*")
plt.plot(vol_range, v_bm3(vol_range, *popt), 'b-')
plt.ylabel("Static energy (a.u.)")
plt.xlabel("V (A^3)")
plt.show()
def p_static(nvol=50, v_add=[], e_add=[]):
"""
Computes a static BM3-EoS from a P/V set of data. Data (cell volumes in A^3 and
pressures in GPa) must be contained in a file whose name must be specified
in the input file (together with the energy, in hartree, at the equilibrium
static volume.
Args:
nvol: number of volume points for the graphical output (default 50)
v_add / e_add: lists of volume/energy data to be plotted together
with the E/V curve from the V-EoS fit. Such added
points are not used in the fit (no points added as default)
Note:
This function provides static data for the calculation of the static
contribution to the Helmholtz free energy. It is an alternative to
the fit of the static E/V data performed by the 'static' function.
"""
add_flag=False
if v_add != []:
add_flag=True
p_data=np.loadtxt(data_p_file)
pres_gpa=p_data[:,1]
vs=p_data[:,0]
pres=pres_gpa*1e-21/conv
pstat, cstat = curve_fit(bm3, vs, pres, p0=ini[0:3],ftol=1e-15,xtol=1e-15)
info.popt=pstat
info.popt=np.append(info.popt,static_e0)
k_gpa=info.popt[1]*conv/1e-21
kp=info.popt[2]
v0=info.popt[0]
info.k0_static=k_gpa
info.kp_static=kp
info.v0_static=v0
print("\nStatic BM3 EoS")
print("\nBulk Modulus: %5.2f GPa" % k_gpa)
print("Kp: %5.2f " % kp )
print("V0: %5.4f A^3" % v0)
print("E0: %5.8e hartree" % info.popt[3])
vol_min=np.min(vs)
vol_max=np.max(vs)
ps=info.popt[0:3]
vol_range=np.linspace(vol_min,vol_max,nvol)
p_GPa=bm3(vol_range, *ps)*conv/1e-21
plt.figure(0)
plt.title("P(V) static BM3 curve")
plt.plot(vs,pres_gpa,"*")
plt.plot(vol_range, p_GPa, 'b-')
plt.ylabel("Pressure (GPa)")
plt.xlabel("V (A^3)")
plt.show()
p_stat.flag=True
p_stat.vmin=np.min(vs)
p_stat.vmax=np.max(vs)
p_stat.pmin=np.min(pres_gpa)
p_stat.pmax=np.max(pres_gpa)
p_stat.npoints=vs.size
p_stat.k0=k_gpa
p_stat.kp=kp
p_stat.v0=v0
p_stat.e0=static_e0
energy_static=v_bm3(vol_range, *info.popt_orig)
energy_pstatic=v_bm3(vol_range, *info.popt)
delta=energy_pstatic-energy_static
select=(volume >= vol_min) & (volume <= vol_max)
vv=volume[select]
ee=energy[select]
plt.figure()
plt.plot(vol_range, energy_static, "k-", label="STATIC case")
plt.plot(vol_range, energy_pstatic, "k--", label="PSTATIC case")
plt.plot(vv,ee,"k*", label="Original E(V) data")
if add_flag:
plt.plot(v_add, e_add, "r*", label="Not V-BM3 fitted data")
plt.legend(frameon=False)
plt.xlabel("Volume (A^3)")
plt.ylabel("E (hartree)")
plt.title("E(V) curves")
plt.show()
plt.figure()
plt.plot(vol_range,delta,"k-")
plt.xlabel("Volume (A^3)")
plt.ylabel("E (hartree)")
plt.title("Pstatic and static energy difference")
plt.show()
delta=abs(delta)
mean=delta.mean()
mean_j=mean*conv*avo/zu
std=delta.std()
imx=np.argmax(delta)
mx=delta[imx]
vx=vol_range[imx]
print("Mean discrepancy: %6.3e hartree (%5.1f J/mole)" % (mean, mean_j))
print("Standard deviation: %4.1e hartree" % std)
print("Maximum discrepancy %6.3e hartree for a volume of %6.2f A^3" % (mx, vx))
def static_pressure_bm3(vv):
"""
Outputs the static pressure (in GPa) at the volume (vv)
Args:
vv: volume
"""
static(plot=False)
k0=info.popt[1]
kp=info.popt[2]
v0=info.popt[0]
p_static_bm3=bm3(vv,v0, k0,kp)
ps=p_static_bm3*conv/1e-21
print("Static pressure at the volume: %4.2f" % ps)
def start_bm4():
bm4.on()
bm4.estimates(volume,energy)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bm4p, bm4c = curve_fit(bm4.energy, volume, energy, \
method='dogbox', p0=bm4.en_ini,ftol=1e-15,xtol=1e-15,gtol=1e-15)
bm4.store(bm4p)
bm4.upgrade()
bm4.upload(bm4p)
bm4_k=bm4p[1]*conv/1e-21
kp=bm4p[2]
kpp=bm4p[3]
v0=bm4p[0]
print("\nStatic BM4-EoS")
print("\nBulk Modulus: %5.2f GPa" % bm4_k)
print("Kp: %5.2f " % kp)
print("Kpp: %5.2f " % kpp)
print("V0: %8.4f A^3" % v0)
print("\n")
plt.figure()
# bm4e=np.array([])
vbm4=np.linspace(min(volume),max(volume),50)
bm4e=bm4.energy(vbm4,*bm4.bm4_static_eos)
plt.plot(vbm4,bm4e,"k-")
plt.plot(volume,energy,"k*")
plt.title("Static Energy: BM4 fit")
plt.xlabel("Static energy (a.u.)")
plt.ylabel("V (A^3)")
plt.show()
def free(temperature):
"""
Computes the Helmholtz free energy (hartree) at a given temperature
Args:
temperature: temperature (in K) at which the computation is done
Note:
1. ei is the static energy
2. enz_i is the zero point energy
3. fth_i is thermal contribution to the Helmholtz free energy
4. tot_i is the total Helmholtz free energy
Note:
This is a direct calculation that avoids the fit of a polynomium
to the frequencies. No FITVOL in input.txt
Note:
If kieffer.flag is True, the contribution from acoustic branches
is taken into account, by following the Kieffer model.
"""
energy_tot=[]
for ivol in int_set:
vol_i=data_vol_freq_orig[ivol]
if bm4.flag:
ei=bm4.energy(vol_i,*bm4.bm4_static_eos)
else:
ei=v_bm3(vol_i, *info.popt)
enz_i=0.
fth_i=0.
eianh=0.
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
freq_i=lo.data_freq[ifreq,ivol+1]
if freq_i >= 0.:
fth_i=fth_i+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/temperature))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz_i=enz_i+deg[ifreq]*freq_i*ez_fact
evib_i=enz_i+fth_i*kb*temperature/conv+eianh
tot_i=ei+evib_i
energy_tot=np.append(energy_tot,tot_i)
if kieffer.flag:
free_k=kieffer.get_value(temperature)
free_k=free_k/(avo*conv)
energy_tot=energy_tot+free_k
return energy_tot
def free_fit(temperature):
"""
Computes the Helmholtz free energy (in hartree) at a given temperature
Args:
temperature: temperature (in K)
Note:
1. ei is the static energy
2. enz_i is the zero point energy
3. fth_i is thermal contribution to the Helmholtz free energy
4. tot_i is the total Helmholtz free energy
Note:
This computation makes use of polynomia fitted
to the frequencies of each vibrational mode, as
functions of volume. It is activated by the keyword
FITVOL in the input.txt file
Note:
Possible contributions from anharmonicity (keyword ANH in the input
file) or from a modified Kieffer model (keyword KIEFFER in the input file)
are included. NO contribution from DISP modes is considered (phonon dispersion
from a supercell calculation).
Note: the volumes at which the free energy refers are defined in the fit_vol
list
"""
energy_tot=[]
eianh=0.
if flag_spline.flag:
fit_vol=flag_spline.fit_vol
elif flag_poly.flag:
fit_vol=flag_poly.fit_vol
for ivol in fit_vol:
if bm4.flag:
ei=bm4.energy(ivol,*bm4.bm4_static_eos)
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
else:
ei=v_bm3(ivol,*info.popt)
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,ivol,temperature)*anharm.wgt[im]
enz_i=0.
fth_i=0.
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
if not flag_spline.flag:
freq_i=freq_v_fun(ifreq,ivol)
else:
freq_i=freq_spline_v(ifreq,ivol)
if freq_i >= 0.:
fth_i=fth_i+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/temperature))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz_i=enz_i+deg[ifreq]*freq_i*ez_fact
evib_i=enz_i+fth_i*kb*temperature/conv+eianh
tot_i=ei+evib_i
energy_tot=np.append(energy_tot,tot_i)
if kieffer.flag:
free_k=kieffer.get_value(temperature)
free_k=free_k/(avo*conv)
energy_tot=energy_tot+free_k
return energy_tot
def free_fit_vt(tt,vv):
"""
Computes the Helmholtz free energy at a given pressure and volume.
Free energy is computed by addition of several contributions:
(1) static contribution from a volume-integrated BM3 EoS
(2) vibrational contribution from optical vibrational modes
(3) vibrational contribution from phonon dispersion (supercell calculations)
(4) vibrational contribution from acoustic modes (modified Kieffer model)
(5) vibrational contribution from anharmonic mode(s)
Contributions (1) and (2) are always included; contributions (3) and (4)
are mutually exclusive and are respectively activated by the keywords
DISP and KIEFFER in the input file; anharmonic contributions (5) are activated
by the keyword ANH in the input file.
Args:
tt: temperature (K)
vv: volume (A^3)
"""
e_static=v_bm3(vv,*info.popt)
enz=0
fth=0
eianh=0.
if anharm.flag:
eianh=0.
for im in np.arange(anharm.nmode):
eianh=eianh+helm_anharm_func(im,vv,tt)*anharm.wgt[im]
for ifreq in int_mode:
if ifreq in exclude.ex_mode:
pass
else:
if not flag_spline.flag:
freq_i=freq_v_fun(ifreq,vv)
else:
freq_i=freq_spline_v(ifreq,vv)
if freq_i >= 0.:
fth=fth+deg[ifreq]*np.log(1-np.e**(freq_i*e_fact/tt))
else:
print("Negative frequency found: mode n. %d" % ifreq)
stop()
enz=enz+deg[ifreq]*freq_i*ez_fact
tot_no_static=enz+fth*kb*tt/conv+eianh
tot=e_static+tot_no_static
if kieffer.flag:
free_k=kieffer.get_value(tt)
free_k=free_k/(avo*conv)
tot=tot+free_k
if disp.flag and (disp.eos_flag or disp.thermo_vt_flag):
if not disp.fit_vt_flag:
disp.free_fit_vt()
print("\n**** INFORMATION ****")
print("The V,T-fit of the phonon dispersion surface was not prepared")
print("it has been perfomed with default values of the relevant parameters")
print("Use the disp.free_fit_vt function to redo with new parameters\n")
disp_l=disp.free_vt(tt,vv)
free_f=(tot_no_static+disp_l)/(disp.molt+1)
tot=e_static+free_f
return tot
def eos_temp_range(vmin_list, vmax_list, npp, temp):
"""
EoS computed for different volumes ranges
Args:
vmin_list: list of minimum volumes
vmax_list: list of maximum volumes
npp: number of points in each V-range
temp: temperature
Note:
vmin_list and vmax_list must be lists of same length
"""
final=np.array([])
size=len(vmin_list)
for vmin, vmax in zip(vmin_list,vmax_list):
v_list=np.linspace(vmin,vmax,npp)
free_list=np.array([])
for iv in v_list:
ifree=free_fit_vt(temp, iv)
free_list=np.append(free_list,ifree)
pterm, pcov_term = curve_fit(v_bm3, v_list, free_list, \
p0=ini, ftol=1e-15, xtol=1e-15)
k_gpa=pterm[1]*conv/1e-21
k_gpa_err=pcov_term[1]*conv/1e-21
pmax=pressure(temp,vmin)
pmin=pressure(temp,vmax)
final=np.append(final, [vmin, vmax, round(pmax,1), round(pmin,1), round(pterm[0],4), round(k_gpa,2), \
round(pterm[2],2)])
final=final.reshape(size,7)
final=final.T
pd.set_option('colheader_justify', 'center')
df=pd.DataFrame(final, index=['Vmin','Vmax','Pmax','Pmin','V0','K0','Kp'])
df=df.T
print("\nBM3-EoS computed for different volume ranges")
print("Temperature: %6.1f K" % temp)
print("")
print(df.to_string(index=False))
def g_vt_dir(tt,pp,**kwargs):
flag_volume_max.value=False
l_arg=list(kwargs.items())
v0_flag=False
g0_flag=False
for karg_i in l_arg:
if 'g0' == karg_i[0]:
g0_flag=True
gexp=karg_i[1]
elif 'v0' == karg_i[0]:
v0_flag=True
v0_value=karg_i[1]
vol0=volume_dir(298.15,0.0001)
fact=1.
if v0_flag:
fact=(1e25*v0_value*zu/avo)/vol0
gref=free_fit_vt(298.15,vol0)*conv*avo/zu + 0.0001*vol0*fact*avo*1e-21/zu
if g0_flag:
gref=gref-gexp
vv=volume_dir(tt,pp)
if flag_volume_max.value:
flag_volume_max.inc()
if flag_volume_max.jwar < 2:
print("Warning g_vt_dir: volume exceeds maximum set in volume_range")
free_f=free_fit_vt(tt,vv)
gtv=(avo/zu)*(free_f*conv) + (avo/zu)*pp*vv*fact*1e-21
return gtv-gref
def entropy_v(tt,vv, plot=False, prt=False, **kwargs):
"""
Entropy and specific heat at constant volume
Args:
tt: temperature
vv: volume
plot (optional): (default False) plots free energy vs T for checking
possible numerical instabilities
prt (optional): (default False) prints formatted output
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Returns:
if prt=False (default) outputs the entropy and the specific heat
at constant volume (unit: J/mol K). if prt=True, a formatted
output is printed and the function provides no output
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
if delta_ctrl.adaptive:
delta=delta_ctrl.get_delta(tt)
else:
delta=delta_ctrl.get_delta()
maxv=max(data_vol_freq)
free_f=[]
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
t_range=np.linspace(min_t,max_t,nump)
for i_t in t_range:
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(i_t,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(i_t)
if (pterm[0]>maxv):
if flag_warning.value:
print("\nWarning: volume out of range; reduce temperature")
flag_warning.off()
flag_warning.inc()
if bm4.flag:
f1=bm4.energy(vv,*pterm)
else:
f1=v_bm3(vv,*pterm)
free_f=np.append(free_f,f1)
if disp.flag:
disp_l=[]
disp.free_fit(disp.temp,vv,disp=False)
for i_t in t_range:
if not disp.thermo_vt_flag:
idf=disp.free_func(i_t)
else:
idf=disp.free_vt(i_t,vv)
disp_l=np.append(disp_l,idf)
free_f=(free_f+disp_l)/(disp.molt+1)
if plot:
plt.figure(4)
plt.plot(t_range,free_f,"*")
plt.title("F free energy (a.u.)")
plt.show()
fit=np.polyfit(t_range,free_f,degree)
der1=np.polyder(fit,1)
der2=np.polyder(fit,2)
entropy=-1*np.polyval(der1,tt)*conv*avo/zu
cv=-1*np.polyval(der2,tt)*tt*conv*avo/zu
if prt:
print("\nEntropy: %7.2f J/mol K" % entropy)
print("Specific heat (at constant volume): %7.2f J/mol K" % cv)
return None
else:
return entropy, cv
def entropy_dir_v(tt, vv, prt=False):
"""
Computation of the entropy at a given volume by means of the free_fit_vt
function. The method is EoS free and automatically includes contributions
from optic modes, off-center modes and anharmonic modes.
Args:
tt: temperature (K)
vv: cell volume (A^3)
prt: detailed output
Note:
In case phonon dispersion is included, the disp.thermo_vt mode
must be activated. The function checks and, in case, activates such
mode.
"""
if disp.flag:
if not disp.thermo_vt_flag:
print("Warning: disp.thermo_vt activation")
disp.thermo_vt_on()
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
if delta_ctrl.adaptive:
delta=delta_ctrl.get_delta(tt)
else:
delta=delta_ctrl.get_delta()
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
free_f=np.array([])
t_range=np.linspace(min_t,max_t,nump)
for it in t_range:
ifree=free_fit_vt(it,vv)
free_f=np.append(free_f, ifree)
free_fit=np.polyfit(t_range, free_f, degree)
free_der1=np.polyder(free_fit,1)
free_der2=np.polyder(free_fit,2)
entropy=-1*np.polyval(free_der1,tt)*conv*avo/zu
cv=-1*np.polyval(free_der2,tt)*tt*conv*avo/zu
if prt:
print("\nEntropy: %7.2f J/mol K" % entropy)
print("Specific heat (at constant volume): %7.2f J/mol K" % cv)
return None
else:
return entropy, cv
def entropy_p(tt,pp,plot=False,prt=True, dir=False, **kwargs):
"""
Entropy and specific heat at constant volume at selected temperature
and pressure
Args:
tt: temperature
pp: pressure
plot (optional): (default False) plots free energy vs T for checking
possible numerical instabilities
prt (optional): (default True) prints formatted output
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Returns:
if prt=False outputs the entropy (J/mol K); if prt=True (default),
a formatted output is printed and the function returns None
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
if dir:
vol=volume_dir(tt,pp)
ent_v=entropy_dir_v(tt, vol, prt)
else:
ent_v=entropy_v(tt,vol,plot,prt,fix=fix_value)
else:
vol=new_volume(tt,pp)
if dir:
vol=volume_dir(tt,pp)
ent_v=entropy_dir_v(tt, vol, prt)
else:
ent_v=entropy_v(tt,vol,plot,prt)
if prt:
print("Pressure: %5.2f GPa; Volume %8.4f A^3" % (pp, vol))
return None
else:
return ent_v
def thermal_exp_v(tt,vv,plot=False,**kwargs):
"""
Thermal expansion at a given temperature and volume
Args:
tt: temperature
vv: volume
plot (optional): (default False) plots pressure vs T for checking
possible numerical instabilities
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Returns:
thermal expansion (K^-1), bulk modulus (GPa) and pressure (GPa)
at given temperature=tt and volume=vv
Notes:
The value is obtained by calculating (dP/dT)_V divided by K
where K=K0+K'*P; P is obtained by the BM3 EoS's whose parameters
(at temperatures in the range "t_range") are refined by fitting
the free energy F(V,T) curves. The different pressures calculated
(at constant vv) for different T in t_range, are then fitted by a
polynomial of suitable degree ("degree" variable) which is then
derived analytically at the temperature tt, to get (dP/dT)_V
If "fix" > 0.1, the BM3 fitting is done by keeping kp fixed at the
value "fix".
The function outputs the thermal expansion (in K^-1), the bulk
modulus [at the pressure P(vv,tt)] and the pressure (in GPa)
if the boolean "plot" is True (default) a plot of P as a
function of T is plotted, in the range t_range
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
delta=delta_ctrl.get_delta()
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
maxv=max(data_vol_freq)
pressure=[]
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
t_range=np.linspace(min_t,max_t,nump)
for ict in t_range:
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(ict,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(ict)
if bm4.flag:
f1=bm4.pressure(vv,pterm[0],pterm[1],pterm[2],pterm[3])*\
conv/1e-21
else:
f1=bm3(vv,pterm[0],pterm[1],pterm[2])*conv/1e-21
pressure=np.append(pressure,f1)
if (pterm[0]>maxv):
if flag_warning.value:
print("\nWarning: volume out of range; reduce temperature")
flag_warning.off()
flag_warning.inc()
if plot:
plt.figure(5)
plt.plot(t_range,pressure,"*")
plt.title("Pressure (GPa)")
plt.show()
fit=np.polyfit(t_range,pressure,degree)
der1=np.polyder(fit,1)
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(tt,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(tt)
if bm4.flag:
pressure=bm4.pressure(vv,pterm[0],pterm[1],pterm[2],pterm[3])*\
conv/1e-21
else:
pressure=bm3(vv,pterm[0],pterm[1],pterm[2])*conv/1e-21
k=(pterm[1]*conv/1e-21)+pterm[2]*pressure
return np.polyval(der1,tt)/k,k,pressure
def thermal_exp_p(tt,pp,plot=False,exit=False,**kwargs):
"""
Thermal expansion at given temperature and pressure, based on
the computation of K*alpha product.
Args:
tt: temperature
pp: pressure
plot (optional): plots pressure vs T values (see help to
the thermal_exp_v function)
exit: if True, the alpha value is returned without formatting (default False)
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
Note:
see help for the thermal_exp_v function
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot,fix=fix_value)
else:
vol=new_volume(tt,pp)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot)
if exit:
return alpha
else:
print("\nThermal expansion: %6.2e K^-1" % alpha)
print("Bulk modulus: %6.2f GPa" % k)
print("Pressure: %6.2f GPa" % pressure)
print("Volume: %8.4f A^3\n" % vol)
def alpha_serie(tini,tfin,npoint,pp,plot=False,prt=True, fit=True,HTlim=0.,\
degree=1, save='', g_deg=1, tex=False, title=True, **kwargs):
"""
Thermal expansion in a temperature range, at a given pressure (pp),
and (optional) fit with a polynomium whose powers are specified
in the input.txt file
Note:
The computation is perfomed by using the thermal_exp_v function
that is based on the evaluation of K*alpha product (for details,
see the documentation of the thermal_exp_v function).
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if HTlim > 0.:
alpha_limit=grun_therm_serie(tini,tfin,npoint=12,HTlim=HTlim,degree=degree,\
g_deg=g_deg, ex=True)
t_range=np.linspace(tini,tfin,npoint)
alpha_serie=[]
for ict in t_range:
if fixpar:
vol=new_volume(ict,pp,fix=fix_value)
[alpha_i,k,pressure]=thermal_exp_v(ict,vol,plot,fix=fix_value)
else:
vol=new_volume(ict,pp)
[alpha_i,k,pressure]=thermal_exp_v(ict,vol,plot)
alpha_serie=np.append(alpha_serie,alpha_i)
if HTlim > 0:
t_range=np.append(t_range,HTlim)
alpha_serie=np.append(alpha_serie,alpha_limit)
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
fig=plt.figure(10)
ax=fig.add_subplot(111)
ax.plot(t_range,alpha_serie,"k*")
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
if latex.flag:
ax.set_xlabel("T (K)", fontsize=fontsize)
ax.set_ylabel(r'$\alpha$ (K$^{-1}$)', fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
ax.set_xlabel("T (K)")
ax.set_ylabel("Alpha (K^-1)")
if title:
plt.title("Thermal expansion")
if prt:
serie=(t_range, alpha_serie)
df=pd.DataFrame(serie,index=['Temp.','alpha'])
df=df.T
print("\n")
df['alpha']=df['alpha'].map('{:,.3e}'.format)
df['Temp.']=df['Temp.'].map('{:,.2f}'.format)
print(df.to_string(index=False))
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_fun,t_range,alpha_serie,p0=coef_ini)
tvfin=tfin
if HTlim > 0:
tvfin=HTlim
t_value=np.linspace(tini,tvfin,pr.ntemp_plot_cp)
alpha_value=[]
for ict in t_value:
alpha_i=alpha_fun(ict,*alpha_fit)
alpha_value=np.append(alpha_value,alpha_i)
plt.plot(t_value,alpha_value,"k-")
if save !='':
plt.savefig(fname=path+'/'+save,dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
if prt:
return None
elif fit:
return alpha_fit
else:
return None
def alpha_fun(tt,*coef):
"""
Outputs the thermal expansion at a given temperature, from
the fit obtained with the alpha_serie function
"""
alpha=0.
jc=0
while jc<lpow_a:
alpha=alpha+coef[jc]*(tt**power_a[jc])
jc=jc+1
return alpha
def dalpha_dt(tt,pp,**kwargs):
"""
Outputs the derivative of alpha with respect to T
at constant pressure. It is used by dCp_dP
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
delta=pr.delta_alpha
nump=pr.nump_alpha
degree=pr.degree_alpha
alpha=[]
min_t=tt-delta/2.
max_t=tt+delta/2.
if min_t < 0.1:
min_t=0.1
t_range=np.linspace(min_t,max_t,nump)
for ict in t_range:
if fixpar:
alpha_i=thermal_exp_p(ict,pp,fix=fix_value,exit=True)
else:
alpha_i=thermal_exp_p(ict,pp,exit=True)
alpha=np.append(alpha,alpha_i)
fit=np.polyfit(t_range,alpha,degree)
dfit=np.polyder(fit,1)
return np.polyval(dfit,tt)
def alpha_dir(tt,pp):
"""
Calculation of the thermal expansion at a given temperature and
pressure. The computation is done by following the definition of
alpha, as alpha=1/V (dV/dT)_P.
Args:
tt: temperature (K)
pp: pressure (GPa)
Note:
The calculation of the volume at a ginen temperature is done
by the volume_dir function
"""
dt=delta_ctrl.get_delta()
nt=delta_ctrl.get_nump()
dt2=dt/2.
deg=delta_ctrl.get_degree()
alpha_opt.on()
v0=volume_dir(tt,pp,alpha_flag_1=True, alpha_flag_2=False)
if not vol_opt.flag:
alpha_opt.off()
t_list=np.linspace(tt-dt2, tt+dt2, nt)
vl=np.array([])
tl=np.array([])
for it in t_list:
iv=volume_dir(it,pp,alpha_flag_1=True, alpha_flag_2=True)
if vol_opt.flag:
vl=np.append(vl,iv)
tl=np.append(tl,it)
fit=np.polyfit(tl,vl,deg)
fit_d=np.polyder(fit,1)
alpha=np.polyval(fit_d,tt)
alpha=alpha/v0
return alpha
def alpha_dir_v(tmin, tmax, nt=12, type='spline', deg=4, smooth=0.001, comp=False, fit=False, trim=0., phase=''):
"""
Computes thermal expansion from the derivative of a V(T) function
calculated on a generally large T range.
Args:
tmin: minimum temperature
tmax: maximum temperature
nt: number of T points in the range (default 12)
type: if 'spline' (default), a spline fit of the V(T) values is performed;
otherwise a polynomial fit is chosen.
deg: degree of the spline (or polynomial) fit of the V(T) values (default 4)
smooth: smoothness parameter of the spline fit (default 0.001);
relevant if type='spline'
comp: if True, the thermal expansions from other methods
are also computed and plotted (default False)
fit: if True, a power serie fit is performed and parameters are returned
trim: if trim > 0. and if fit=True, the power serie fit is done over the
[tmin, tmax-trim] T-range, to avoid possible fitting problems at the end of the
high temperature interval
phase: if not empty and if fit=True, uploads the coefficients of the
power serie fit for the selected phase (default '')
Note:
The spline fit is performed on the Log(V) values; the derivative
of the spline fit does coincide with the definition of thermal expansion
Note:
the volume at each temperature is computed by using the volume_dir function
Note:
Without selecting phase, to upload the parameters from the power serie fit,
execute the alpha_dir_v function by saving the output in a variable;
then use the load_alpha method of the mineral class to upload the variable.
"""
print("\nSummary of the input parameters\n")
print("T range: %5.1f, %5.1f K, Num. of points: %4i" % (tmin, tmax, nt))
if type=='spline':
print("Type of Log(V) fit: %s, degree: %2i, smooth: %5.4f" % (type, deg, smooth))
else:
print("Type of Log(V) fit: %s, degree: %2i" % (type, deg))
print("Compare with other methods to compute alpha: %s" % comp)
print("Fit alpha values to a power serie: %s" % fit)
if fit:
print("Trim applied to T and alpha values for the power serie fit: %5.1f" % trim)
if phase != '':
print("Power serie coefficient uploaded for the phase %s" % phase)
print("")
t_list=np.linspace(tmin, tmax, nt)
v_list=np.array([])
# internal flag: complete calculation if all the three flags
# are set to True.
# flag[0]: calculation from volume_dir
# flag[1]: calculation from EoS
# flag[2]: calculation from volume_from_F
flag=[True, True, True]
for it in t_list:
iv=volume_dir(it,0)
v_list=np.append(v_list,iv)
if comp:
al_list=np.array([])
therm_list=np.array([])
if flag[0]:
for it in t_list:
ial=alpha_dir(it,0)
al_list=np.append(al_list, ial)
if flag[1]:
if f_fix.flag:
reset_fix()
for it in t_list:
ith=thermal_exp_p(it,0., exit=True)[0]
therm_list=np.append(therm_list, ith)
if flag[2]:
alpha_from_F=volume_from_F_serie(tmin, tmax, nt, expansion=True, debug=False,\
export_alpha=True)
v_log=np.log(v_list)
if type=='spline':
v_log_fit=UnivariateSpline(t_list, v_log, k=deg, s=smooth)
alpha_fit=v_log_fit.derivative()
alpha_calc=alpha_fit(t_list)
else:
v_log_fit=np.polyfit(t_list, v_log, deg)
alpha_fit=np.polyder(v_log_fit,1)
alpha_calc=np.polyval(alpha_fit, t_list)
t_plot=np.linspace(tmin,tmax, nt*10)
if type=='spline':
v_log_plot=v_log_fit(t_plot)
alpha_plot=alpha_fit(t_plot)
else:
v_log_plot=np.polyval(v_log_fit, t_plot)
alpha_plot=np.polyval(alpha_fit, t_plot)
if fit:
t_trim=np.copy(t_list)
alpha_trim=np.copy(alpha_calc)
if trim > 0.1:
trim_idx=(t_trim < (tmax-trim))
t_trim=t_list[trim_idx]
alpha_trim=alpha_trim[trim_idx]
coef_ini=np.ones(lpow_a)
fit_al,_=curve_fit(alpha_dir_fun,t_trim,alpha_trim,p0=coef_ini)
alpha_fit_plot=list(alpha_dir_fun(it, *fit_al) for it in t_plot)
plt.figure()
plt.plot(t_list, v_log,"k*", label="Actual Log(V) values")
plt.plot(t_plot, v_log_plot, "k-", label="Spline fit")
plt.xlabel("T (K)")
plt.ylabel("Log(V)")
plt.xlim(tmin, tmax)
plt.title("Log(V) vs T")
plt.legend(frameon=False)
plt.show()
plt.figure()
plt.plot(t_plot, alpha_plot, "k-", label="From V(T) fit")
if comp:
if flag[2]:
plt.plot(t_list, alpha_from_F, "ko", label="From Volume_from_F")
if flag[0]:
plt.plot(t_list, al_list, "k*", label="From definition (dir)")
if flag[1]:
plt.plot(t_list, therm_list, "k+", label="From (dP/dT)_V and EoS")
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.xlim(tmin, tmax)
plt.legend(frameon=False)
plt.title("Thermal expansion")
plt.show()
if fit:
plt.figure()
plt.plot(t_list, alpha_calc, "k*", label="Actual values")
plt.plot(t_plot, alpha_fit_plot, "k-", label="Power serie fit")
plt.xlabel("T (K)")
plt.xlim(tmin, tmax)
plt.ylabel("Alpha (K^-1)")
plt.legend(frameon=False)
plt.title("Alpha: power serie fit")
plt.show()
if comp & flag[0] & flag[1] & flag[2]:
fmt="{:4.2e}"
fmt2="{:11.4f}"
fmt3="{:6.1f}"
alpha_calc=list(fmt.format(ia) for ia in alpha_calc)
al_list=list(fmt.format(ia) for ia in al_list)
therm_list=list(fmt.format(ia) for ia in therm_list)
alpha_from_F=list(fmt.format(ia) for ia in alpha_from_F)
v_list=list(fmt2.format(iv) for iv in v_list)
t_list=list(fmt3.format(it) for it in t_list)
serie=(t_list,v_list,alpha_calc,alpha_from_F,al_list,therm_list)
df=pd.DataFrame(serie,\
index=[' Temp',' V ',' (1) ',' (2) ', ' (3) ', ' (4) '])
df=df.T
print("")
print(df.to_string(index=False))
print("")
print("(1) from V(T) fit")
print("(2) from V(T) from F fit")
print("(3) from the definition ('dir' computation)")
print("(4) From (dP/dT)_V and EoS")
else:
fmt="{:4.2e}"
fmt2="{:11.4f}"
fmt3="{:6.1f}"
alpha_calc=list(fmt.format(ia) for ia in alpha_calc)
v_list=list(fmt2.format(iv) for iv in v_list)
t_list=list(fmt3.format(it) for it in t_list)
serie=(t_list,v_list,alpha_calc)
df=pd.DataFrame(serie,\
index=[' Temp',' V ',' Alpha'])
df=df.T
print("")
print(df.to_string(index=False))
if fit and (phase != ''):
print("")
eval(phase).load_alpha(fit_al, power_a)
eval(phase).info()
if fit and (phase == ''):
return fit_al
def alpha_dir_serie(tmin, tmax, nt, pp, fit=True, prt=True):
"""
Thermal expansion in a given range of temperatures. The computation
is done by using the alpha_dir function that, in turn, makes use
of the volume_dir function (EoS-free computation of the volume at
a given pressure and temperature).
Args:
tmin, tmax, nt: minimum, maximum temperatures (K) and number of points
in the T-range
pp: pressure (GPa)
fit: if True, a power serie fit of the alpha values is performed
(see ALPHA keyword in the input file)
prt: if True, a detailed output is printed.
"""
t_list=np.linspace(tmin,tmax,nt)
t_l=np.array([])
alpha_l=np.array([])
for it in t_list:
ial=alpha_dir(it,pp)
if alpha_opt.flag:
alpha_l=np.append(alpha_l,ial)
t_l=np.append(t_l,it)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_l,alpha_l,p0=coef_ini)
if fit:
t_list=np.linspace(tmin,tmax,nt*4)
alpha_fit_c=alpha_dir_fun(t_list,*alpha_fit)
fig=plt.figure()
ax = fig.add_subplot(111)
ax.plot(t_l,alpha_l,"k*")
if fit:
ax.plot(t_list, alpha_fit_c,"k-")
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.title("Thermal expansion")
plt.show()
if prt:
fmt1="{:5.1f}"
fmt2="{:4.2e}"
t_l=list(fmt1.format(it) for it in t_l)
alpha_l=list(fmt2.format(ia) for ia in alpha_l)
serie=(t_l, alpha_l)
df=pd.DataFrame(serie,index=['Temp.',' Alpha '])
df=df.T
print("\n")
print(df.to_string(index=False))
print("")
volume_ctrl.shift=0.
if fit:
return alpha_fit
def alpha_dir_fun(tt,*coef):
"""
Outputs the thermal expansion at a given temperature, from
the fit obtained with the alpha_dir_serie function
"""
alpha=0.
jc=0
while jc<lpow_a:
alpha=alpha+coef[jc]*(tt**power_a[jc])
jc=jc+1
return alpha
def alpha_dir_from_dpdt(tt, pp, prt=False):
"""
Computes thermal expansion, at any temperature and pressure, from the
K*alpha product, by using 'dir' functions only (no equation of state
involved at any step). In particular, the required (dP/dT)_V derivative
is calculated from pressures obtained by the pressure_dir function; the
volume and the bulk modulus at T, P is obtained by means of the
bulk_modulus_p function (with noeos=True)
Args:
tt: temperature (K)
pp: pressure (GPa)
prt: is True, alpha, K and V are printed; otherwise unformatted values
are returned (default False)
"""
bulk, vol=bulk_modulus_p(tt, pp, noeos=True, prt=False)
delta=delta_ctrl.get_delta()
nump=delta_ctrl.get_nump()
degree=delta_ctrl.get_degree()
delta=delta/2.
t_list=np.linspace(tt-delta, tt+delta, nump)
pressure_list=np.array([])
for it in t_list:
ip=pressure_dir(it, vol)
pressure_list=np.append(pressure_list, ip)
fit=np.polyfit(t_list, pressure_list, degree)
fitder=np.polyder(fit,1)
k_alpha=np.polyval(fitder, tt)
alpha=k_alpha/bulk
if prt:
print("Thermal expansion: %6.2e (K^-1)" % alpha)
print("Bulk modulus: %6.2f (GPa) " % bulk)
print("Volume: %8.4f (A^3) " % vol)
else:
return alpha, bulk, vol
def alpha_dir_from_dpdt_serie(tmin, tmax, nt=12, pp=0, fit=False, phase='',
save=False, title=True, tex=False):
"""
Thermal expansion in a T-range. The function makes use of the
alpha_dir_from_dpdt function.
Args:
tmin, tmax: minimum and maximum temperature (in K)
nt: number of points in the T-range (default 12)
pp: pressure (GPa)
fit: if True, a power series fit is performed
phase: if not equal to '', and fit is True, the coefficients
of the power series fit are uploaded in the internal database
(default '')
save: if True, a figure is saved in a file (default False)
tex: if True, latex format is used for the figure (default False)
title: if False, the title printing is suppressed (default True)
Note:
If a phase is specified and fit is True, use the export function to
upload the parameters of the power series in the database file
Example:
>>> alpha_dir_from_dpdt_serie(100, 500, fit=True, phase='py')
>>> export('py')
"""
t_list=np.linspace(tmin, tmax, nt)
alpha_list=np.array([])
for it in t_list:
ia,_,_=alpha_dir_from_dpdt(it, pp, prt=False)
alpha_list=np.append(alpha_list, ia)
if fit:
if flag_alpha==False:
print("\nWarning: no polynomium defined for fitting alpha's")
print("Use ALPHA keyword in input file")
return None
coef_ini=np.ones(lpow_a)
alpha_fit, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
if fit:
t_plot=np.linspace(tmin,tmax,nt*4)
alpha_fit_plot=alpha_dir_fun(t_plot,*alpha_fit)
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
plt.figure()
tit_text="Thermal expansion at pressure "+str(pp)+" GPa"
plt.plot(t_list, alpha_list, "k*", label="Actual values")
if fit:
plt.plot(t_plot, alpha_fit_plot, "k-", label="Power series fit")
if latex.flag:
plt.xlabel("T (K)", fontsize=fontsize)
plt.ylabel(r'$\alpha$ (K$^{-1}$)', fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
if fit:
plt.legend(frameon=False, prop={'size': fontsize})
if title:
plt.suptitle(tit_text, fontsize=fontsize)
else:
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
if fit:
plt.legend(frameon=False)
if title:
plt.title(tit_text)
if save:
name=path+'/'+'alpha_from_dpdt.'+ext
plt.savefig(name, dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
if fit and (phase != ''):
print("")
eval(phase).load_alpha(alpha_fit, power_a)
eval(phase).info()
elif fit:
return alpha_fit
def cp_dir(tt,pp, prt=False):
"""
Computes the specific heat at constant pressure by using 'dir' functions.
In particular, at T and P, the equilibrium volume, the entropy, the specific
heat at constant volume and the thermal expansion are calculated by respectively
using the volume_dir, the entropy_dir_v and the alpha_dir_from_dpdt functions;
bulk modulus is evaluated by means of the bulk_modulus_p function with the
option noeos set to True (the volume and bulk modulus values are from the
alpha_dir_from_dpdt function output, too).
Args:
tt: temperature (K)
pp: pressure (GPa)
prt: if True a detailed output is printed
"""
if disp.flag:
if not disp.thermo_vt_flag:
disp.thermo_vt_on()
alpha, k, vol=alpha_dir_from_dpdt(tt,pp, prt=False)
ent,cv=entropy_dir_v(tt, vol)
cp=cv+vol*(avo*1e-30/zu)*tt*k*1e9*alpha**2
if prt:
print("Cp: %6.2f, Cv: %6.2f, S %6.2f (J/K mol)" % (cp, cv, ent))
print("K: %6.2f (GPa), Alpha: %6.2e (K^-1), Volume: %8.4f (A^3)" % (k, alpha, vol))
else:
return cp
def cp_dir_serie(tmin, tmax, nt, pp=0):
t_list=np.linspace(tmin, tmax, nt)
cp_list=np.array([cp_dir(it, pp) for it in t_list])
plt.figure()
plt.plot(t_list, cp_list, "k-")
plt.show()
def cp(tt,pp,plot=False,prt=False,dul=False,**kwargs):
"""
Specific heat at constant pressure (Cp) and entropy (S)
Args:
tt: temperature
pp: pressure
fix (optional): optimizes Kp if fix=0, or keeps Kp
fixed if fix=Kp > 0
plot (optional): checks against numerical issues
(experts only)
prt (optional): prints formatted results
Note:
Cp = Cv + V*T*K*alpha^2
Cp, Cv (J/mol K), Cp/Cv, alpha (K^-1), K=K0+K'P (GPa)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
pr_e=False
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
[ent,cv]=entropy_v(tt,vol,plot,pr_e,fix=fix_value)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot,fix=fix_value)
else:
vol=new_volume(tt,pp)
[ent,cv]=entropy_v(tt,vol,plot,pr_e)
[alpha,k,pressure]=thermal_exp_v(tt,vol,plot)
cp=cv+vol*(avo*1e-30/zu)*tt*k*1e9*alpha**2
if prt:
print("\nCp: %6.2f, Cv: %6.2f, Cp/Cv: %7.5f, alpha: %6.3e, K: %6.2f\n"\
% (cp, cv, cp/cv, alpha, k))
return None
elif dul == False:
return cp[0], ent
else:
return cp[0],ent,cp/cv
def cp_fun(tt,*coef):
"""
Computes the specific heat a constant pressure, at a given temperature
from the fit Cp(T) performed with the cp_serie function
"""
cp=0.
jc=0
while jc<lpow:
cp=cp+coef[jc]*(tt**power[jc])
jc=jc+1
return cp
def dcp_dp(tt,pp,**kwargs):
"""
Derivative of Cp with respect to P (at T constant)
Args:
tt: temperature
pp: pressure
fix (optional): fixed Kp value; if fix=0., Kp is
optimized
Notes:
The derivative is evaluated from the relation
(dCp/dP)_T = -VT[alpha^2 + (d alpha/dT)_P]
It is **strongly** advised to keep Kp fixed (Kp=fix)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
dalpha=dalpha_dt(tt,pp,fix=fix_value)
alpha,k,pres=thermal_exp_v(tt,vol,fix=fix_value,plot=False)
else:
vol=new_volume(tt,pp)
dalpha=dalpha_dt(tt,pp)
alpha,k,pres=thermal_exp_v(tt,vol,plot=False)
dcp=-1*(vol*avo*1e-21/zu)*tt*(alpha**2+dalpha)
print("\n(dCp/dP)_T: %5.2f J/(mol K GPa) " % dcp)
print("(dAlpha/dT)_P: %6.2e K^-2 " % dalpha)
def compare_exp(graph_exp=True, unit='j' ,save="",dpi=300,**kwargs):
"""
Compare experimental with computed data for Cp and S;
makes a plot of the data
Args:
graph_exp: if True, a plot of Cp vs T is produced
unit: unit of measure of experimental data; allowed values are 'j' or
'cal' (default 'j')
save: file name to save the plot (no file written by default)
dpi: resolution of the image (if 'save' is given)
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if (unit == 'j') or (unit == 'J'):
conv_f=1.
elif (unit == 'cal') or (unit == 'CAL'):
conv_f=4.184
else:
print("Warning: unit %s is unknow. J is assumed" % unit)
conv_f=1.
if not flag_exp:
print("Warning: experimental data file not found")
return
t_list=data_cp_exp[:,0]
cp_exp_list=data_cp_exp[:,1]*conv_f
s_exp_list=data_cp_exp[:,2]*conv_f
cp_calc=[]
s_calc=[]
for ti in t_list:
if fixpar:
cp_i, ent_i=cp(ti,0.,fix=fix_value,plot=False,prt=False)
else:
cp_i, ent_i=cp(ti,0.,plot=False,prt=False)
cp_calc=np.append(cp_calc,cp_i)
s_calc=np.append(s_calc, ent_i)
cp_diff=cp_calc-cp_exp_list
s_diff=s_calc-s_exp_list
exp_serie=(t_list,cp_exp_list,cp_calc,cp_diff,s_exp_list,s_calc,\
s_diff)
df=pd.DataFrame(exp_serie,\
index=['Temp','Cp exp','Cp calc','Del Cp','S exp','S calc','Del S'])
df=df.T
df2=df.round(2)
print("")
print(df2.to_string(index=False))
dcp=abs(df["Del Cp"].values)
ds=abs(df["Del S"].values)
mean_error_cp=dcp.mean()
mean_error_s=ds.mean()
max_error_cp=dcp.max()
max_error_s=ds.max()
print("\nAverage error on Cp: %.2f; maximum error: %.2f" % \
(mean_error_cp, max_error_cp))
print("Average error on S: %.2f; maximum error: %.2f" % \
(mean_error_s, max_error_s))
if graph_exp:
if not flag_cp:
print("\nWarning: no polynomium defined for fitting Cp's" )
print("No graphical comparison made")
return
tmin=np.min(t_list)
tmax=np.max(t_list)
npoint=pr.ntemp_fit_compare
if fixpar:
cp_fit=cp_serie(tmin,tmax,npoint,0.,fix=fix_value, plot=False,\
prt=False,fit=True,graph=False)
else:
cp_fit=cp_serie(tmin,tmax,npoint,0., plot=False,\
prt=False,fit=True,graph=False)
ntemp=pr.ntemp_plot_compare
t_graph=np.linspace(tmin,tmax,ntemp)
cp_graph=[]
for it in t_graph:
cp_graph_i=cp_fun(it,*cp_fit)
cp_graph=np.append(cp_graph,cp_graph_i)
plt.figure(11)
plt.plot(t_graph,cp_graph,"k-", label='Calc')
plt.plot(t_list,cp_exp_list,"k*", label='Exp')
plt.xlabel("T(K)")
plt.ylabel("Cp (J/mol K)")
plt.title("Experimental vs. calculated Cp(T)")
plt.legend()
if save != '':
plt.savefig(fname=path+'/'+save, dpi=dpi)
plt.show()
if not flag_warning.value:
print("Warning: issue on volume repeated %d times" % \
flag_warning.jwar)
flag_warning.reset()
flag_warning.value=True
def cp_serie(tini,tfin,points,pp, HTlim=0., model=1, g_deg=1, plot=False,prt=False, \
fit=True, t_max=0., graph=True, save='', tex=False, title=True, **kwargs):
"""
Outputs a list of Cp values (J/mol K) in a given temperature range,
at a fixed pressure
Args:
tini: minimum temperature (K)
tfin: maximum temperature
points: number of points in the T range
pp: pressure (GPa)
HTlim: if HTlim > 0, the Dulong-Petit limit (DP) for Cp is imposed at a
high T value (HTlim); the procedure is performed by computing
Cv in the [tini, tfin] T range and by fitting the Cv curve by the
Einstein's model after the DP limit is added for T=HTlim;
The gamma value (Cp/Cv) at T > tfin is linerarly extrapolated from the
gamma(T) fit obtained in the [tini,tfin] range. For T > tfin
(and up to HTlim) Cp is computed as the product of Cv (from the
Einstein's model) and the extrapolated gamma.
t_max: maximum temperature at which the power series Cp(T) fit is
done. If t_max=0. (default), tmax=HTlim. The parameter is
relevant only if HTlim is not zero.
fix (optional): keeps Kp fixed at the value Kp=fix if
fix > 0.1
prt (optional): print a table of Cp(T) values if prt=True (default)
fit (optional): fits the Cp(T) values with a polynomial function
whose powers must be specified in the input.txt
file
graph (optional): makes a plot of the Cp(T) serie and its fit
if graph=True (default)
save (optional, string): saves the plot image in the file name
specified
dpi (optional): dpi resolution of the saved image
Note:
to output the coefficients of the fit, prt must be set to
False
The optional argument plot (default: False) is for checking
possible numerical issues
It is advised to keep Kp fixed during the computation
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
dlflag=False
if HTlim > 0.:
dlflag=True
print("\n*** High temperature Cp estimation from the Dulong-Petit limit\n")
print(" T limit: %5.2f" % HTlim)
if t_max < 0.001:
t_max=HTlim
t_extra=np.linspace(tfin+20,t_max,16)
cp_lim=np.array([])
if model==1:
ein_t=einstein_t(tini,tfin,12,HTlim,dul=True)
else:
ein_t=einstein_t(tini,tfin,12,HTlim,dul=True,model=2)
pol_gamma=gamma_estim(tini,tfin,npoint=12,g_deg=g_deg)
print("Gamma estimation (extrapolation from lower T values)\n")
for ix in t_extra:
if model==1:
cv_ix=einstein_fun(ix,ein_t[0])
else:
cv_ix=einstein_2_fun(ix,ein_t[0],ein_t[1])
gam_ix=gamma_calc(ix,pol_gamma)
cp_ix=cv_ix*gam_ix
print("T: %8.2f Cv: %8.2f gamma: %5.3f Cp: %8.2f" % (ix, cv_ix, gam_ix, cp_ix))
cp_lim=np.append(cp_lim,cp_ix)
prt_c=False
t_serie=np.linspace(tini,tfin,points)
cp_serie=[]
for ict in t_serie:
if fixpar:
cpi, ent_i=cp(ict,pp,plot,prt_c,fix=fix_value)
else:
cpi, ent_i=cp(ict,pp,plot,prt_c)
cp_serie=np.append(cp_serie,cpi)
if dlflag:
cp_serie=np.append(cp_serie,cp_lim)
t_serie=np.append(t_serie,t_extra)
if prt:
serie=(t_serie, cp_serie)
df=pd.DataFrame(serie,index=['Temp.','Cp'])
df=df.T
print("\n")
df2=df.round(2)
print(df2.to_string(index=False))
if graph:
dpi=80
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ticksize=latex.get_tsize()
plt.figure(6)
plt.plot(t_serie,cp_serie,"k*")
if fit:
if not flag_cp:
print("\nWarning: no polynomium defined for fitting Cp's")
print("Use CP keyword in input file")
return None
coef_ini=np.ones(lpow)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cp_fit, cp_cov=curve_fit(cp_fun,t_serie,cp_serie,p0=coef_ini)
if dlflag:
tfin=t_max
t_value=np.linspace(tini,tfin,pr.ntemp_plot_cp)
cp_value=[]
for ict in t_value:
cpi=cp_fun(ict,*cp_fit)
cp_value=np.append(cp_value,cpi)
if graph:
plt.plot(t_value,cp_value,"k-")
if graph:
if latex.flag:
plt.xlabel("T (K)", fontsize=fontsize)
plt.ylabel("$C_P$ (J/mol K)", fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
plt.xlabel("T(K)")
plt.ylabel("Cp (J/mol K)")
if title:
plt.title("Specific heat as a function of T")
if save !='':
plt.savefig(fname=path+'/'+save,dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
if prt:
return None
elif fit:
return cp_fit
else:
return None
def gamma_estim(tini,tfin,npoint=12,g_deg=2):
t_list=np.linspace(tini,tfin,npoint)
gamma_list=np.array([])
for it in t_list:
dum1,dum2,gamma=cp(it,0,dul=True)
gam=gamma[0]
gamma_list=np.append(gamma_list,gam)
pol=np.polyfit(t_list,gamma_list,g_deg)
gamma_fit.upload(g_deg,pol)
gamma_calc_list=list(gamma_calc(it, pol) for it in t_list)
plt.figure()
plt.plot(t_list,gamma_list,"*")
plt.plot(t_list,gamma_calc_list,"k-")
plt.xlabel("T(K)")
plt.ylabel("Gamma")
plt.title("Gamma (Cp/Cv) as a function of T")
plt.show()
return pol
def gamma_calc(tt,pol):
return np.polyval(pol,tt)
def bulk_serie(tini,tfin,npoint,fit=True,degree=2,update=False,\
save='', tex=False, title=True, **kwargs):
"""
Computes the bulk modulus K0 as a function of temperature in a given
T range
Args:
tini: minimum temperature
tfin: maximum temperature
npoint: number of points in the T range
fix (optional): keeps Kp constant in the calculation of K0
if fix=Kp > 0.1. If fix=0. Kp is optimized
at every different temperature.
fit (optional): makes a polynomial fit of the K0(T) values
degree (optional): degree of polynomial for the fitting
save (optional, string): file name of the saved plot
dpi (optional, integer): dpi resolution of the saved image
Note:
the fix argument overrides the value of Kp possibly set
by the set_fix function
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
t_serie=np.linspace(tini,tfin,npoint)
b_serie=[]
for ict in t_serie:
if fixpar:
[free_energy, pterm, pcov_term]=bmx_tem(ict,fix=fix_value)
else:
[free_energy, pterm, pcov_term]=bmx_tem(ict)
k0t=pterm[1]*conv/1e-21
b_serie=np.append(b_serie,k0t)
dpi=80
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ticksize=latex.get_tsize()
plt.figure(7)
plt.plot(t_serie,b_serie,"k*")
if title:
plt.title("Bulk modulus as a function of T")
if latex.flag:
plt.xlabel("T (K)", fontsize=fontsize)
plt.ylabel("$K_0$ (GPa)", fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
plt.xlabel("T (K)")
plt.ylabel("K0 (GPa)")
if fit:
fit_b=np.polyfit(t_serie,b_serie,degree)
b_fit=np.polyval(fit_b,t_serie)
plt.plot(t_serie,b_fit,"k-")
print("\nResults from the fit (from high to low order)")
np.set_printoptions(formatter={'float': '{: 4.2e}'.format})
print(fit_b)
np.set_printoptions(formatter=None)
if save !='':
plt.savefig(fname=path+'/'+save,dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
if update:
return fit_b
def free_v(tt,vol,**kwargs):
"""
Helmholtz free energy at a given temperature and volume
Unit: a.u.
Args:
tt: temperature (K)
vol: cell volume (A^3)
Keyword Args:
fix: if fix is provided, it controls (and overrides the setting
possibly chosen by set_fix) the optimization of kp in BM3;
if fix > 0.1, kp = fix and it is not optimized.
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
[ff,pterm,pcov]=bmx_tem(tt,fix=fix_value)
else:
[ff,pterm,pcov]=bmx_tem(tt)
return v_bm3(vol,*pterm)
def gibbs_p(tt,pp,**kwargs):
l_arg=list(kwargs.items())
fixpar=False
v0_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
elif 'v0' == karg_i[0]:
v0_flag=True
v0_value=karg_i[1]
if fixpar:
vol=new_volume(tt,pp,fix=fix_value)
f_energy=free_v(tt,vol[0],fix=fix_value)
else:
vol=new_volume(tt,pp)
f_energy=free_v(tt,vol[0])
if disp.flag:
if not disp.themo_vt_flag:
f_disp=disp.free_func(tt)+v_bm3(vol[0],*info.popt)*disp.molt
else:
f_disp=disp.free_vt(tt,vol)+v_bm3(vol[0],*info.popt)*disp.molt
f_energy=(f_energy+f_disp)/(disp.molt+1)
fact=1.
if v0_flag:
if fixpar:
v0_qm=new_volume(298.15,0.0001,fix=fix_value)
else:
v0_qm=new_volume(298.15,0.0001)
fact=((1e25*zu*v0_value)/avo)/v0_qm[0]
vol=vol*fact
gibbs_pt=(avo/zu)*(f_energy*conv) + (avo/zu)*pp*vol*1e-21
return gibbs_pt
def gibbs_tp(tt,pp,**kwargs):
l_arg=list(kwargs.items())
fixpar=False
v0_flag=False
g0_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
elif 'g0' == karg_i[0]:
g0_flag=True
gexp=karg_i[1]
elif 'v0' == karg_i[0]:
v0_flag=True
v0_value=karg_i[1]
if v0_flag:
if fixpar:
gref=gibbs_p(298.15,0.,fix=fix_value,v0=v0_value)
else:
gref=gibbs_p(298.15, 0.,v0=v0_value)
else:
if fixpar:
gref=gibbs_p(298.15,0.,fix=fix_value)
else:
gref=gibbs_p(298.15, 0.)
if g0_flag:
gref=gref-gexp
if v0_flag:
if fixpar:
gtp=gibbs_p(tt,pp,fix=fix_value,v0=v0_value)
else:
gtp=gibbs_p(tt,pp,v0=v0_value)
else:
if fixpar:
gtp=gibbs_p(tt,pp,fix=fix_value)
else:
gtp=gibbs_p(tt,pp)
return gtp-gref
def gibbs_serie_p(pini, pfin, npres, tt, prt=True, **kwargs):
"""
Gibbs free energy in a pressure interval, at a given temperature
Args:
pini: minimum pressure (GPa)
pfin: maximum pressure (GPa)
npres: number of points in the interval
tt: temperature (K)
prt (optional): if True, prints a numerical table G(P)
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
g0: Experimental G at the reference T and P (J/mol)
v0: Experimental V at the reference T and P (J/mol bar)
Returns:
Gibbs free energy in J/mol
Note:
the free energy is given with reference to the energy
at the standard state (298.15 K; 0 GPa)
"""
l_arg=list(kwargs.items())
fixpar=False
g0_flag=False
v0_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
elif 'g0' == karg_i[0]:
g0_flag=True
gexp=karg_i[1]
elif 'v0'==karg_i[0]:
v0_flag=True
v0_value=karg_i[1]
if fixpar:
if v0_flag:
gref=gibbs_p(298.15,0.0001,fix=fix_value,v0=v0_value)
else:
gref=gibbs_p(298.15,0.0001,fix=fix_value)
else:
if v0_flag:
gref=gibbs_p(298.15,0.0001,v0=v0_value)
else:
gref=gibbs_p(298.15,0.0001)
if g0_flag:
gref=gref-gexp
p_list=np.linspace(pini,pfin, npres)
g_list=np.array([])
if not v0_flag:
if fixpar:
for pi in p_list:
gi=gibbs_p(tt,pi,fix=fix_value)
g_list=np.append(g_list,gi)
else:
for pi in p_list:
gi=gibbs_p(tt,pi)
g_list=np.append(g_list,gi)
else:
if fixpar:
for pi in p_list:
gi=gibbs_p(tt,pi,fix=fix_value,v0=v0_value)
g_list=np.append(g_list,gi)
else:
for pi in p_list:
gi=gibbs_p(tt,pi,v0=v0_value)
g_list=np.append(g_list,gi)
g_list=g_list-gref
maxg=max(g_list)*0.99
ming=min(g_list)*1.01
fig=plt.figure()
ax=fig.add_subplot(111)
ax.title.set_text("Gibbs free energy vs P\n")
ax.plot(p_list,g_list,"k-")
ax.axis([pini,pfin,ming,maxg])
ax.yaxis.set_major_locator(plt.MaxNLocator(6))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax.set_xlabel("Pressure (GPa)")
ax.set_ylabel("G (J/mol)")
plt.show()
if prt:
print("\nPress (GPa) Gibbs energy (J/mol)\n" )
for ip,ig in zip(p_list,g_list):
print(" %5.2f %8.1f" % (ip, ig))
def gibbs_serie_t(tini, tfin, ntemp, pp, prt=True, **kwargs):
"""
Gibbs free energy in a temperature interval, at a given pressure
Args:
tini: minimum temperature (K)
tfin: maximum temperature (K)
ntemp: number of points in the interval
pp: pressure (GPa)
prt (optional): if True, prints a numerical table G(Y)
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
g0: Experimental G at the reference T and P (J/mol)
v0: Experimental V at the reference T and P (J/mol bar)
Returns:
Gibbs free energy in J/mol
Note:
The free energy is given with reference to the energy
at the standard state (298.15 K; 0 GPa)
"""
l_arg=list(kwargs.items())
fixpar=False
g0_flag=False
v0_flag=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
elif 'g0' == karg_i[0]:
g0_flag=True
gexp=karg_i[1]
elif 'v0'==karg_i[0]:
v0_flag=True
v0_value=karg_i[1]
if fixpar:
if v0_flag:
gref=gibbs_p(298.15,0.0001,fix=fix_value,v0=v0_value)
else:
gref=gibbs_p(298.15,0.0001,fix=fix_value)
else:
if v0_flag:
gref=gibbs_p(298.15,0.0001,v0=v0_value)
else:
gref=gibbs_p(298.15,0.0001)
if g0_flag:
gref=gref-gexp
t_list=np.linspace(tini,tfin, ntemp)
g_list=np.array([])
if not v0_flag:
if fixpar:
for ti in t_list:
gi=gibbs_p(ti,pp,fix=fix_value)
g_list=np.append(g_list,gi)
else:
for ti in t_list:
gi=gibbs_p(ti,pp)
g_list=np.append(g_list,gi)
else:
if fixpar:
for ti in t_list:
gi=gibbs_p(ti,pp,fix=fix_value,v0=v0_value)
g_list=np.append(g_list,gi)
else:
for ti in t_list:
gi=gibbs_p(ti,pp, v0=v0_value)
g_list=np.append(g_list,gi)
g_list=g_list-gref
maxg=max(g_list)*0.999
ming=min(g_list)*1.001
fig=plt.figure()
ax=fig.add_subplot(111)
ax.title.set_text("Gibbs free energy vs T\n")
ax.plot(t_list,g_list,"k-")
ax.axis([tini,tfin,ming,maxg])
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax.set_xlabel("Temperature (K)")
ax.set_ylabel("G (J/mol)")
plt.show()
if prt:
print("\nTemp (K) Gibbs energy (J/mol)\n" )
for it,ig in zip(t_list,g_list):
print(" %6.2f %8.1f" % (it, ig))
def eos_temp(tt,prt=True,update=False,kp_only=False, save=False, \
tex=False, title=True):
"""
Outputs the EoS (BM3) at a given temperature
Args:
tt: temperature (K)
prt (optional): if prt=True (default) plots the F(V) function
and a list o volume/pressure at the chosen
temperature
Note:
In the optimization, Kp can be kept fixed to the value
set by the set_fix function
"""
volb=data_vol_freq
if flag_poly.flag:
volb=flag_poly.fit_vol
elif flag_spline.flag:
volb=flag_spline.fit_vol
[free_energy, pterm,pcov_term]=bmx_tem(tt)
k_gpa=pterm[1]*conv/1e-21
kp=pterm[2]
v0=pterm[0]
info.temp=tt
info.k0=k_gpa
info.kp=kp
info.v0=v0
if kp_only:
return
if bm4.flag:
ks=pterm[3]
perr_t=np.sqrt(np.diag(pcov_term))
if f_fix.flag:
perr_t[2]=0.00
ke=perr_t[1]*conv/1e-21
if bm4.flag:
print("\n ** BM4 fit **")
else:
print("\n ** BM3 fit **")
print("\nEoS at the temperature of %5.2f K" % tt)
print("\nBulk Modulus: %5.2f (%4.2f) GPa" % (k_gpa, ke))
print("Kp: %5.2f (%4.2f)" % (kp, perr_t[2]))
if bm4.flag:
print("Kpp %5.2f (%4.2f)" % (ks, perr_t[3]))
print("V0: %7.4f (%5.3f) A^3\n" % (v0, perr_t[0]))
fix_status()
fit_status()
if update:
return v0, k_gpa, kp
if not prt:
print("\n")
return
vol_min=np.min(volb)
vol_max=np.max(volb)
nvol=pr.nvol_eos
vol_range=np.linspace(vol_min,vol_max,nvol)
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
fig, ax=plt.subplots()
if title:
plt.title("F(V) curve at T= %5.2f K" % tt)
ax.plot(volb, free_energy, "k*")
if bm4.flag:
plt.plot(vol_range, bm4.energy(vol_range, *pterm),'k-')
else:
plt.plot(vol_range, v_bm3(vol_range, *pterm), 'k-')
if latex.flag:
plt.xlabel("V (\AA$^3$)", fontsize=fontsize)
plt.ylabel("F (a.u.)", fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
plt.xlabel("V (A^3)")
plt.ylabel("F (a.u.)")
ax.ticklabel_format(axis='y', style='sci', useOffset=False)
if save:
filename=path+'/eos' + '.' + ext
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
print("\nVolume-Pressure list at %5.2f K\n" % tt)
for vp_i in volb:
if bm4.flag:
pv_i=bm4.pressure(vp_i,v0,k_gpa,kp,ks)
else:
pv_i=bm3(vp_i,v0,k_gpa,kp)
print(" %5.3f %5.2f" % (vp_i, pv_i))
def eosfit_dir(file_name, unit=False):
"""
Writes a PVT file to be used with EosFit
Temperature data are in the temperature_list list
Args:
file_name: name of the output file
unit: if unit=True, volumes are converted in cm^3/mol
Example:
>>> eosfit_dir("myfile.dat")
enclose the file name in quotes.
Note:
The computation of P(V,T) is performed without reference to
any EoS, as pressure at (V,T) is computed as numerical
derivative of F with respect to V at constant temperature.
"""
file_name=path+'/'+file_name
if not flag_eos:
print("\nWarning: set of temperatures for EoSFit output not defined")
print("Use TEMP keyword in input file")
return
if (not flag_poly.flag) and (not flag_spline.flag):
war1="Warning: frequency fit is off; use of poly or spline fits"
war2=" is mandatory for bulk_dir"
print(war1+war2)
return
flag_volume_max.value=False
if flag_poly.flag:
volb=flag_poly.fit_vol
elif flag_spline.flag:
volb=flag_spline.fit_vol
volmin=min(volb)
eos_data=np.array([])
for ti in temperature_list:
volmax=volume_dir(ti,0.)
if flag_volume_max.value:
print("Warning: volume exceeds maximum set in volume_range")
print("Temperature %4.2f, Volume %8.4f" % (ti, volmax))
continue
volnew=np.linspace(volmin,volmax,16)
for vi in volnew:
pi=pressure_dir(ti,vi)
if supercell.flag:
vi=vi/supercell.number
if unit:
vi=vi*1e-24*avo/zu
if pi >=-0.02:
pvt=np.array([pi, vi, ti])
eos_data=np.append(eos_data,pvt)
iraw=np.int(eos_data.size/3)
eosdata=np.reshape(eos_data,(iraw,3))
string='TITLE Input prepared with Numpy script\nformat 1 P V T'
np.savetxt(file_name, eosdata, fmt="%5.3f %12.4f %8.2f", \
header=string, comments="")
print("\nEoSFit file %s saved" % file_name)
def eosfit(file_name,**kwargs):
"""
Writes a PVT file to be used with EosFit
Temperature data are in the temperature_list list
Args:
file_name: name of the output file
Keyword Args:
if the optional argument 'fix' is larger than 0.1, Kp=fix is fixed
Example:
>>> eosfit("myfile.dat")
enclose the file name in quotes
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if not flag_eos:
print("\nWarning: set of temperatures for EoSFit output not defined")
print("Use TEMP keyword in input file")
return None
volb=data_vol_freq_orig
if flag_poly.flag:
volb=flag_poly.fit_vol
elif flag_spline.flag:
volb=flag_spline.fit_vol
file_name=path+'/'+file_name
maxvol=max(volb)
npoint=volb.size
eos_data=np.array([])
for itt in temperature_list:
volt=new_volume(itt,0.)
if volt >=maxvol:
print("Temperature %4.2f not considered" % itt)
print("Equilibrium volume (%6.3f) exceeds the volume range"\
% volt)
break
volb_t=np.linspace(min(volb),volt,npoint)
if fixpar:
[ff,pterm,pcov]=bmx_tem(itt,fix=fix_value)
else:
[ff,pterm,pcov]=bmx_tem(itt)
k_gpa=pterm[1]*conv/1e-21
kp=pterm[2]
v0=pterm[0]
if bm4.flag:
ks=pterm[3]
for vi in volb_t:
if bm4.flag:
pv_i=bm4.pressure(vi,v0,k_gpa,kp,ks)
else:
pv_i=bm3(vi,v0,k_gpa,kp)
if pv_i>=0.:
pvt=np.array([pv_i, vi, itt])
eos_data=np.append(eos_data,pvt)
iraw=np.int(eos_data.size/3)
eosdata=np.reshape(eos_data,(iraw,3))
string='TITLE Input prepared with Numpy script\nformat 1 P V T'
np.savetxt(file_name, eosdata, fmt="%5.2f %12.4f %8.2f", \
header=string, comments="")
print("EoSFit file %s saved" % file_name)
def new_volume(tt,pr,**kwargs):
"""
Computes the volume (A^3) at a given pressure and temperature
Args:
tt: temperature (K)
pp: pressure (GPa)
Keyword Args:
fix (optional): used to keep Kp fixed
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
[free_energy, pterm,pcov_term]=bmx_tem(tt,fix=fix_value)
else:
[free_energy, pterm,pcov_term]=bmx_tem(tt)
k0=pterm[1]*conv/1e-21
kp=pterm[2]
v0=pterm[0]
if bm4.flag:
p_fun=lambda vv: ((bm4.pressure(vv,pterm[0],pterm[1],pterm[2],pterm[3])*\
conv/1e-21)-pr)**2
else:
p_fun=lambda vv: ((3*k0/2)*((v0/vv)**(7/3)-(v0/vv)**(5/3))* \
(1+(3/4)*(kp-4)*((v0/vv)**(2/3)-1))-pr)**2
vol=scipy.optimize.minimize(p_fun,v0,tol=1e-18)
return vol.x
def freq_v_fit(ifr):
"""
Computes the coefficients of the polynomium fitting the frequency
of the "ifr" mode with respect to volume; the degree of the fitting
polynomium ("dg") is specified in the input.txt file, under
the keyword FITVOL, or it can be set by the set_poly function.
"""
ifl=np.array([])
dg=flag_poly.degree
new_vol_range=np.array([])
vmin, vmax=min(flag_poly.fit_vol), max(flag_poly.fit_vol)
for ivol in int_set:
if (data_vol_freq_orig[ivol] >= vmin) and (data_vol_freq_orig[ivol] <= vmax):
ifl=np.append(ifl,lo.data_freq[ifr,ivol+1])
new_vol_range=np.append(new_vol_range, data_vol_freq_orig[ivol])
pol=np.polyfit(new_vol_range,ifl,dg)
return pol
def freq_stack_fit():
"""
Accumulates all the coefficients of the polynomia fitting the
frequencies of all the modes, computed by freq_v_fit.
Outputs the array "pol_stack" used by freq_v_fun
"""
pol_stack=np.array([])
dg=flag_poly.degree
for ifr in int_mode:
pol_i=freq_v_fit(ifr)
pol_is=np.array(pol_i)
pol_stack=np.append(pol_stack, pol_is)
pol_stack=pol_stack.reshape(int_mode.size,dg+1)
return pol_stack
def freq_v_fun(ifr,vv):
"""
Outputs the frequency of the "ifr" mode as a function of volume
by using the polynomial fit computed with freq_v_fit
"""
if not flag_poly.flag_stack:
print("Polynomial stack not present; use set_poly to create it")
return
pol_stack=flag_poly.pol_stack
pol=pol_stack[ifr,:]
return np.polyval(pol,vv)
# Spline section
def freq_spline(ifr):
ifl=np.array([])
degree=flag_spline.degree
smooth=flag_spline.smooth
new_vol_range=np.array([])
vmin, vmax=min(flag_spline.fit_vol), max(flag_spline.fit_vol)
for ivol in int_set:
if (data_vol_freq_orig[ivol] >= vmin) and (data_vol_freq_orig[ivol] <= vmax):
ifl=np.append(ifl,lo.data_freq[ifr,ivol+1])
new_vol_range=np.append(new_vol_range, data_vol_freq_orig[ivol])
f_uni=UnivariateSpline(new_vol_range,ifl,k=degree,s=smooth)
return f_uni
def freq_stack_spline():
pol_stack=np.array([])
for ifr in int_mode:
pol_i=freq_spline(ifr)
pol_stack=np.append(pol_stack, pol_i)
pol_stack=np.array(pol_stack)
return pol_stack
def freq_spline_v(ifr,vv):
if not flag_spline.flag_stack:
print("Spline stack not present; use set_spline to create it")
return
return flag_spline.pol_stack[ifr](vv)
def freq_poly_p(ifr,tt=300., p0=0., plot=True, prt=True, **kwargs):
"""
Prints the frequency of a given mode at some temperature
and pressure if a spline fitting method has been chosen
Args:
ifr: mode number (starting from 0)
tt: temperature (K)
pp: pressure (GPa)
Keyword Args:
fix (optional): Kp value fixed to *fix* if *fix* > 0.1
Note:
A polynomial fitting must be active
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if not flag_poly.flag:
print("Polinomial fit not active: use set_poly to active it")
return
pol_stack=flag_poly.pol_stack
degree=flag_poly.degree
if fixpar:
vpt=new_volume(tt,p0,fix=fix_value)
else:
vpt=new_volume(tt,p0)
f0=np.polyval(pol_stack[ifr],vpt)
ifl=np.array([])
vol_min=np.min(data_vol_freq)
vol_max=np.max(data_vol_freq)
nvol=pr.nvol
vol_range=np.linspace(vol_min,vol_max,nvol)
for ivol in int_set:
ifl=np.append(ifl,lo.data_freq[ifr,ivol+1])
pol=np.polyfit(data_vol_freq,ifl,degree)
ivalf=np.polyval(pol,vol_range)
if prt:
if plot:
plt.figure(2)
plt.plot(data_vol_freq,ifl,"*")
plt.plot(vol_range,ivalf,"b-")
plt.xlabel("V (A^3)")
plt.ylabel("Freq (cm^-1)")
plt.show()
if prt:
print("\nFrequency: %6.2f cm^-1" % f0)
print("Pressure %4.2f GPa, temperature %5.2f K, " \
"volume %8.4f A^3" % (p0, tt, vpt[0]))
return
else:
return f0
def freq_spline_p(ifr,tt=300.,pp=0.,prt=True,**kwargs):
"""
Prints the frequency of a given mode at some temperature
and pressure if a spline fitting method has been chosen
Args:
ifr: mode number (starting from 0)
tt: temperature (K)
pp: pressure (GPa)
fix (optional): Kp value fixed to *fix* if *fix* > 0.1
A spline fitting must be active
"""
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if not flag_spline.flag:
print("Spline fit not active: use set_spline to active it")
return
if fixpar:
volp=new_volume(tt,pp,fix=fix_value)
else:
volp=new_volume(tt,pp)
fr=freq_spline_v(ifr,volp)
vol=np.linspace(min(data_vol_freq),max(data_vol_freq),pr.nvol)
freq=flag_spline.pol_stack[ifr](vol)
ifl=[]
for ivol in int_set:
ifl=np.append(ifl,lo.data_freq[ifr,ivol+1])
if prt:
plt.figure()
plt.plot(vol,freq,"k-")
plt.plot(data_vol_freq,ifl,"k*")
plt.xlabel("Volume (A^3)")
plt.ylabel("Frequency (cm^-1)")
plt.show()
print("Frequency: %5.2f" % fr)
print("Pressure: %4.2f GPa, temperature %5.2f K, volume %8.4f A^3" %\
(pp, tt, volp))
else:
return fr
def check_spline(ifr, save=False, title=True, tex=False):
"""
Plot of the frequency as a function of volume
Args:
ifr: mode number
save: if True, the plot is saved in a file
dpi: resolution of the plot
ext: graphics file format
title: if True, a title is written above the plot
tex: if True, LaTeX fonts are used in the labels
"""
if not flag_spline.flag:
print("Spline fit not active: use set_spline")
return
vol=np.linspace(min(data_vol_freq),max(data_vol_freq),pr.nvol)
freq=flag_spline.pol_stack[ifr](vol)
ifl=[]
for ivol in int_set:
ifl=np.append(ifl,lo.data_freq[ifr,ivol+1])
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
plt.figure()
leg="Mode number "+str(ifr)
if ifr in exclude.ex_mode:
leg=leg+"\nExcluded from free energy computation"
plt.plot(vol,freq,"k-")
plt.plot(data_vol_freq,ifl,"k*")
if latex.flag:
plt.xlabel("Volume (\AA$^3$)", fontsize=fontsize)
plt.ylabel("Frequency (cm$^{-1}$)", fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
plt.xlabel("Volume (A^3)")
plt.ylabel("Frequency (cm^-1)")
if title:
plt.title(leg)
if save:
filename=path + '/mode_' + str(ifr) + '.' + ext
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
print("Figure saved as %s" % filename)
plt.show()
latex.off()
def check_poly(ifr, save=False, title=True, tex=False):
"""
Plot of the frequency as a function of volume
Args:
ifr: mode number
save: if True, the plot is saved in a file
dpi: resolution of the plot
ext: graphics file format
title: if True, a title is written above the plot
tex: if True, LaTeX fonts are used in the labels
"""
if not flag_poly.flag:
print("Polynomial fit not active: use set_poly")
return
pol_stack=flag_poly.pol_stack
vol=np.linspace(min(data_vol_freq),max(data_vol_freq),pr.nvol)
freq=np.polyval(pol_stack[ifr],vol)
ifl=[]
for ivol in int_set:
ifl=np.append(ifl,lo.data_freq[ifr,ivol+1])
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
plt.figure()
leg="Mode number "+str(ifr)
if ifr in exclude.ex_mode:
leg=leg+"\n Excluded from free energy computation"
plt.plot(vol,freq,"k-")
plt.plot(data_vol_freq,ifl,"k*")
if latex.flag:
plt.xlabel("Volume (\AA$^3$)", fontsize=fontsize)
plt.ylabel("Frequency (cm$^{-1}$)", fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
else:
plt.xlabel("Volume (A^3)")
plt.ylabel("Frequency (cm^{-1})")
if title:
plt.title(leg)
if save:
filename=path + '/mode_' + str(ifr) + '.' + ext
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
print("Figure saved as %s" % filename)
plt.show()
latex.off()
def frequency_p_range(ifr, pmin, pmax, npoint, dir=False, temp=298.15, degree=1, \
title=True, tex=False, save=False):
"""
Frequency of a mode computed as a function of pressure in a given range,
at a fixed temperature.
Args:
ifr: mode number
pmin, pmax, npoint: minimum and maximum pressure in the range (GPa), and
number of points
temp: temperature (default 298.15 K)
dir: if True, volume is computed through the volume_dir function;
otherwise, the EoS-based new_volume function is used (default False)
degree: degree of the fitting polynomial (default 1)
title: if False, title of the plot is suppressed (default True)
tex: if True, Latex output is used for the plot (default False)
save: if True, the plot is saved (default False)
Note:
A fit of the frequencies vs volume (either poly or spline) is required.
Note:
if save is True and tex is True, the fontsize, the resolution and
extension of the saved file are controlled by the parameters of the
latex class.
"""
if not (flag_poly.flag or flag_spline.flag):
print("\n*** Warning No fit of frequency was set\n")
return
npp=np.linspace(pmin, pmax, npoint)
if dir:
freq_p=np.array([])
for ip in npp:
vol=volume_dir(temp, ip)
if flag_poly.flag:
ifreq=freq_v_fun(ifr, vol)
elif flag_spline.flag:
ifreq=freq_spline_v(ifr, vol)
freq_p=np.append(freq_p, ifreq)
else:
if flag_poly.flag:
freq_p=np.array([freq_poly_p(ifr, temp, ip, plot=False, prt=False)[0] for ip in npp])
elif flag_spline.flag:
freq_p=np.array([freq_spline_p(ifr, temp, ip, plot=False, prt=False)[0] for ip in npp])
fit=np.polyfit(npp, freq_p, degree)
p_plot=np.linspace(pmin, pmax, npoint*10)
f_plot=np.polyval(fit, p_plot)
fit_rev=np.flip(fit)
fit_str='fit: freq = ' + str(fit_rev[0].round(3)) + ' + '
for ic in np.arange(1, degree+1):
if ic == degree:
fit_str=fit_str + str(fit_rev[ic].round(3)) + ' P^' + str(ic)
else:
fit_str=fit_str + str(fit_rev[ic].round(3)) + ' P^' + str(ic) + ' + '
dpi=80
ext='png'
if tex:
latex.on()
dpi=latex.get_dpi()
fontsize=latex.get_fontsize()
ext=latex.get_ext()
ticksize=latex.get_tsize()
title="Mode number " + str(ifr)
label="Fit (degree: "+str(degree)+")"
plt.figure()
plt.plot(npp, freq_p, "k*", label="Actual values")
plt.plot(p_plot, f_plot, "k-", label=label)
if latex.flag:
plt.ylabel("Freq (cm$^{-1}$)", fontsize=fontsize)
plt.xlabel("P (GPa)", fontsize=fontsize)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
if title:
plt.suptitle(title, fontsize=fontsize)
plt.legend(frameon=False, prop={'size': fontsize})
else:
plt.ylabel("Freq (cm^-1)")
plt.xlabel("P (GPa)")
if title:
plt.title(title)
plt.legend(frameon=False)
if save:
name=path+'/'+'mode_'+str(ifr)+'_vs_P.'+ext
plt.savefig(name, dpi=dpi, bbox_inches='tight')
plt.show()
latex.off()
print(fit_str)
def check_spline_total():
"""
Plots the frequencies of all the modes as a function of
volumes along with their fitting according to the
spline parameters chosen.
The fitting is restricted to the volume range set by the
set_volume_range function.
"""
for ifr in int_mode:
check_spline(ifr)
def check_spline_list(list_of_modes):
"""
Plots the frequencies of a given list of normal modes as functions
of volumes, along with their fitting according to the spline
parameters chosen.
Args: list_of_modes (a list of integers)
Example:
>>> check_spline_list([0, 1, 2])
"""
for ifr in list_of_modes:
check_spline(ifr)
def check_poly_total():
"""
Plots the frequencies of all the modes as a function of
volumes along with their fitting according to the
polynomial degree chosen.
The fitting is restricted to the volume range set by the
set_volume_range function.
"""
for ifr in int_mode:
check_poly(ifr)
def check_poly_list(list_of_modes):
"""
Plots the frequencies of a given list of normal modes
Args:
list_of_modes (a list of integers)
Example:
>>> check_poly_list([0, 1, 2])
"""
for ifr in list_of_modes:
check_poly(ifr)
def pressure_freq_list(tt,ifr,**kwargs):
if (not flag_poly.flag) and (not flag_spline.flag):
msg1='** Error ** This function can be used only in connection with '
msg2='a fitting of frequencies;\n'
msg3=' '*12 +'POLY or SPLINE must be used'
print(msg1+msg2+msg3)
return
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
pmin=0.
volmin=min(data_vol_freq)
if fixpar:
pmax=pressure(tt,volmin,fix=fix_value)
else:
pmax=pressure(tt,volmin)
p_list=np.linspace(pmin, pmax, pr.npres)
freq_list=np.array([])
for p_i in p_list:
if flag_spline.flag:
if fixpar:
f_i=freq_spline_p(ifr,tt,p_i,prt=False,fix=fix_value)
else:
f_i=freq_spline_p(ifr,tt,p_i,prt=False)
else:
if fixpar:
f_i=freq_poly_p(ifr,tt,p_i,prt=False,fix=fix_value)
else:
f_i=freq_poly_p(ifr,tt,p_i,prt=False)
freq_list=np.append(freq_list,f_i)
return freq_list, p_list
def pressure_freq(ifr,freq,tt,degree=4,**kwargs):
"""
Computes the pressure given the frequency of a normal mode, at a fixed
temperature.
Args:
ifr: normal mode number
freq: value of the frequency
tt: temperature
degree (optional): degree of the polynomial fitting the P/freq
values from the pressure_freq_list function
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
Notes:
it is advised to keep Kp fixed by either specifying fix, or
by using set_fix.
For "noisy" modes, use polynomial fits (set_poly), or
a spline fit (set_spline) with a large smooth parameter.
"""
if (not flag_poly.flag) and (not flag_spline.flag):
msg1='** Error ** This function can be used only in connection with '
msg2='a fitting of frequencies;\n'
msg3=' '*12 +'POLY or SPLINE must be used'
print(msg1+msg2+msg3)
return
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
if fixpar:
freq_list, p_list=pressure_freq_list(tt,ifr,fix=fix_value)
else:
freq_list, p_list=pressure_freq_list(tt,ifr)
plt.figure()
plt.plot(freq_list, p_list,"k*")
plt.ylabel("P (GPa)")
plt.xlabel("Freq (cm^-1)")
f_opt=np.polyfit(freq_list,p_list,deg=degree)
pres_f=np.polyval(f_opt,freq)
plt.plot(freq_list, p_list,"k-")
plt.show()
print("Pressure: %6.2f GPa" % pres_f)
print("Mode: %d, frequency: %6.2f cm^-1, temperature: %6.2f K" %\
(ifr, freq, tt))
def temperature_freq(ifr,freq, tmin, tmax, npt, pp,degree=2,**kwargs):
"""
Computes the temperature given the frequency of a normal mode, at a fixed
pressure. A T range must be specified
Args:
ifr: normal mode number
freq: value of the frequency
tmin: minimum value of T
tmax: maximum value of T
npt: number of T points in the range
pp: pressure
degree (optional): degree of the polynomial fitting the P/freq
values from the pressure_freq_list function
Keyword Args:
fix: Kp fixed, if fix=Kp > 0.1
Note:
it is advised to keep Kp fixed by either specifying fix, or
by using set_fix.
For "noisy" modes, use polynomial fits (set_poly), or
a spline fit (set_spline) with a large smooth parameter.
"""
if (not flag_poly.flag) and (not flag_spline.flag):
msg1='** Error ** This function can be used only in connection with '
msg2='a fitting of frequencies;\n'
msg3=' '*12 +'POLY or SPLINE must be used'
print(msg1+msg2+msg3)
return
l_arg=list(kwargs.items())
fixpar=False
for karg_i in l_arg:
if 'fix' == karg_i[0]:
fix_value=karg_i[1]
fixpar=True
freq_list=np.array([])
t_list=np.linspace(tmin,tmax,npt)
if fixpar:
for ti in t_list:
nvi=new_volume(ti,pp,fix=fix_value)
if flag_spline.flag:
fi=flag_spline.pol_stack[ifr](nvi)
else:
fi=np.polyval(flag_poly.pol_stack[ifr],nvi)
freq_list=np.append(freq_list,fi)
else:
for ti in t_list:
nvi=new_volume(ti,pp)
if flag_spline.flag:
fi=flag_spline.pol_stack[ifr](nvi)
else:
fi=np.polyval(flag_poly.pol_stack[ifr],nvi)
freq_list=np.append(freq_list,fi)
plt.figure()
plt.plot(freq_list, t_list,"k*")
plt.ylabel("T (K)")
plt.xlabel("Freq (cm^-1)")
f_opt=np.polyfit(freq_list,t_list,deg=degree)
temp_f=np.polyval(f_opt,freq)
plt.plot(freq_list, t_list,"k-")
plt.show()
print("Temperature: %6.2f K" % temp_f)
print("Mode: %d, frequency: %6.2f cm^-1, Pressure: %6.2f GPa" %\
(ifr, freq, pp))
def grun_mode_vol(ifr,vv, method='poly',plot=False):
"""
Mode-gamma Gruneisen parameter of a normal mode at a given volume
Args:
ifr: mode number
vv: volume
method (optional): method chosen for the frequency/volume values
(default: 'poly'; other possible method='spline')
plol (optional): if not False (default), plots the frequency values
of the selected mode in a neighborhood of the
volume vv
Returns:
Mode-gamma Gruneisen parameter and the frequency of the mode at the
volume vv
"""
if not vd.flag:
v_range=np.linspace(vv-pr.delta_v,vv+pr.delta_v,pr.nump_v)
else:
v_range=np.linspace(vv-vd.delta,vv+vd.delta,pr.nump_v)
f_list=np.array([])
for iv in v_range:
if flag_poly.flag_stack and method=='poly':
f_i=freq_v_fun(ifr,iv)
elif flag_spline.flag_stack and method=='spline':
f_i=freq_spline_v(ifr,iv)
else:
print("No fitting stack present for the method chosen")
print("Create the appropriate stack by using set_poly or set_spline")
return
f_list=np.append(f_list,f_i)
fit_f=np.polyfit(v_range,f_list,pr.degree_v)
derv=np.polyder(fit_f,1)
derf=np.polyval(derv,vv)
ffit=np.polyval(fit_f,vv)
if plot:
if not vd.flag:
v_fit_list=np.linspace(vv-pr.delta_v,vv+pr.delta_v,40)
else:
v_fit_list=np.linspace(vv-vd.delta,vv+vd.delta,40)
f_poly_list=np.polyval(fit_f,v_fit_list)
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(v_range,f_list,"k*")
ax.plot(v_fit_list,f_poly_list,"k-")
ax.set_xlabel("V (A^3)")
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.set_ylabel("Freq (cm^-1)")
plt.show()
return -1*vv*derf/ffit,ffit
def gruneisen(vol, method='poly',plot=True):
"""
Mode-gamma Gruneisen parameter of all the normal modes at a given volume
Args:
vv: volume
method (optional): method chosen for the frequency/volume values
(default: 'poly'; other possible method='spline')
plot (optional): if True (default), plots the mode-gamma Gruneisen
parameters of all the modes
Returns:
if plot=False, outputs the list of the mode-gamma Gruneisen parameters
of all the modes
"""
if method=='poly' and not flag_poly.flag_stack:
print("Polynomial stack not present; use set_poly to create")
return
if method=='spline' and not flag_spline.flag_stack:
print("Spline stack not present; use set_spline to create")
return
grun_list=np.array([])
freq_list=np.array([])
for im in int_mode:
g_i,f_i=grun_mode_vol(im,vol,method=method,plot=False)
grun_list=np.append(grun_list, g_i)
freq_list=np.append(freq_list, f_i)
if plot:
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(freq_list,grun_list,"k*")
ax.set_xlabel("Mode frequency")
ax.set_ylabel("Gruneisen")
plt.show()
if not plot:
return grun_list
def gruneisen_therm(tt,pp,ex_data=False,prt=True):
"""
Gruneisen parameter: alpha*K_T*V/Cv
Args:
tt: temperature
pp: pressure
ex_data: if True, values of volume, constant volume specific heat,
thermal expansion, bulk modulus and gamma are returned
(default False)
prt: if True, computed values are printed
Note:
The required bulk modulus (Reuss definition) is computed by
the bulk_modulus_p function, with the noeos parameter set to
True.
"""
k, vol=bulk_modulus_p(tt,pp,noeos=True)
ent,cv=entropy_v(tt,vol)
alpha=alpha_dir(tt, pp)
volume=(vol/zu)*avo*1e-30 # volume of a mole in m^3
grun_th=alpha*volume*k*1e9/cv
if prt:
print("\nGruneisen parameter (adimensional): %6.3f\n" % grun_th)
print("Thermal expansion: %6.2e (K^-1)" % alpha)
print("Bulk modulus: %6.2f (GPa)" % k)
print("Specific heat at constant volume: %6.2f (J/mol K)" % cv)
if ex_data:
return vol,cv,alpha,k,grun_th
def q_parameter(pfin=5, temp=298.15, npoint=12):
"""
Calculation of the parameter q of the equation
gamma/gamma_0 = (V/V_0)^q
The Gruneisen parameter is evaluated at constant temperature for
a range of pressures, for which the corresponding volumes are computed,
by using the gruneisen_therm function.
Args:
pfin: final (maximum) pressure (GPa; default 5)
temp: temperature (K; default 298.15)
npoint: number of points in the P range (default 12)
"""
p_list=np.linspace(0., pfin, npoint)
res=list(gruneisen_therm(temp, ip, ex_data=True, prt=False) for ip in p_list)
res=np.array(res)
v_list=res[:,0]
gr_list=res[:,4]
k_list=res[:,3]
r_gr=gr_list/gr_list[0]
r_v=v_list/v_list[0]
r_k=k_list/k_list[0]
qini=[1]
q,_=curve_fit(q_parameter_func, r_v, r_gr, p0=qini)
q_ref=q[0]
rv_plot=np.linspace(np.min(r_v), np.max(r_v), 60)
gr_plot=q_parameter_func(rv_plot, q_ref)
plt.figure()
plt.plot(r_v, r_gr, "k*")
plt.plot(rv_plot, gr_plot, "k-")
plt.xlabel("V/V0")
plt.ylabel("gr/gr_0")
plt.title("q-plot")
plt.show()
print("Temperature: %5.1f K" % temp)
print("Maximum pressure: %5.1f GPa" % pfin)
print("Volume at pressure 0: %7.3f A^3" % v_list[0])
print("Gamma at pressure 0: %7.3f" % gr_list[0])
print("q value: %7.3f" % q_ref)
def q_parameter_func(rv,q):
return rv**q
def delta_T_parameter(tmax, npoint=8, tref=298.15, out=False):
"""
Anderson-Gruneisen parameter delta_T
K_T(T) = K_T(T0)*(V0/V(T))^delta_T
"""
t_list=np.linspace(tref, tmax, npoint)
kv=list(bulk_modulus_p(it, 0., noeos=True) for it in t_list)
kv=np.array(kv)
kl=kv[:,0]
vl=kv[:,1]
k0=kl[0]
v0=vl[0]
rvl=v0/vl
rkl=kl/k0
d_ini=[0.]
d_t,_=curve_fit(delta_T_func, rvl, rkl, p0=d_ini)
delta_t=d_t[0]
print("Determination of the Anderson-Gruneisen parameter\n")
print("T_ref = %5.2f K; T_max = %5.2f" % (tref, tmax))
rv_plot=np.linspace(np.min(rvl), np.max(rvl), npoint*10)
rk_plot=list(delta_T_func(irv,delta_t) for irv in rv_plot)
rk_plot=np.array(rk_plot)
plt.show()
plt.plot(rv_plot, rk_plot,"k-",label="Fit")
plt.plot(rvl,rkl,"k*", label="Actual values")
plt.xlabel("V0/V")
plt.ylabel("K/K0")
plt.title("K/K0 = (V0/V)^delta_T plot")
plt.legend(frameon=False)
plt.show()
print("delta_T = %5.2f" % delta_t)
if out:
return delta_t
def delta_T_func(rv, d_t):
return rv**d_t
def grun_therm_serie(tini,tfin,npoint=12,HTlim=2000,degree=1,g_deg=1, ex=False):
print("\n---- High temperature estimation of the thermal expansion coefficient ----\n")
v0, k_gpa, kp=eos_temp(298.15,prt=False, update=True)
set_fix(kp)
vol=new_volume(298.15,0.0001)
ent, cve=entropy_v(298.15,vol[0])
dp_limit=apfu*3*avo*kb # Dulong Petit limit
emp=10636/(ent/apfu+6.44) # Empirical Einstein T
print("\nDulong-Petit limit of Cv %6.2f (J/mol K)" % dp_limit)
print("Einstein's temperature: %6.2f (K)" % emp)
t_list=np.linspace(tini,tfin,npoint)
v_list=np.array([])
cv_list=np.array([])
k_list=np.array([])
alp_list=np.array([])
for it in t_list:
iv,icv,ialp,ik,igr=gruneisen_therm(it,0,ex_data=True,prt=False)
v_list=np.append(v_list,iv)
cv_list=np.append(cv_list,icv)
k_list=np.append(k_list,ik)
alp_list=np.append(alp_list,ialp)
if not gamma_fit.flag:
pol=gamma_estim(tini,tfin,npoint,g_deg)
else:
pol=gamma_fit.pol
print("Gamma(V) fit from already stored data")
grun_list=np.polyval(pol,t_list)
fact_list=1e-9*grun_list/(k_list*v_list)
f_coef=np.polyfit(t_list,fact_list,degree)
fact_calc=np.polyval(f_coef,t_list)
plt.figure()
plt.plot(t_list,fact_list*1e9,"*")
plt.plot(t_list,fact_calc*1e9)
plt.xlabel("Temperature (K)")
plt.ylabel("J^-1")
plt.title("Gamma/VK_T")
plt.show()
fact_lim=np.polyval(f_coef,HTlim)
alpha_limit=dp_limit*fact_lim
print("\nGamma/VK fit of degree %1i" % degree)
print("Alpha constrained at the high temperature limit: %6.2e K^-1" % alpha_limit)
print("\n -------------------------------")
if ex:
return alpha_limit
def number_phonon_mode(ifr,tt,vol,method='poly'):
"""
Number of phonons of a given mode at given temperature and volume
Args:
ifr: mode number
tt: temperature
vv: volume
method (optional): method chosen for the frequency/volume values
(default: 'poly'; other possible method='spline')
Returns:
Number of phonons computed according to the Bose-Einstein statistics
"""
if method=='poly' and not flag_poly.flag_stack:
print("Polynomial stack not present; use set_poly to create")
return
if method=='spline' and not flag_spline.flag_stack:
print("Spline stack not present; use set_spline to create")
return
if method=='poly':
f_i=freq_v_fun(ifr,vol)
else:
f_i=freq_spline_v(ifr,vol)
f_i=csl*f_i
exp_fact=np.exp(h*f_i/(kb*tt))
return 1./(exp_fact-1),vol
def pressure_phonon_mode(ifr,tt,vol,method='poly'):
"""
Contribution to the vibrational pressure from a given mode, at fixed
temperature and volume
Args:
ifr: mode number
tt: temperature
vv: volume
method (optional): method chosen for the frequency/volume values
(default: 'poly'; other possible method='spline')
Returns:
Vibrational pressure of the "ifr" mode (in GPa) at the selected
temperature (tt) and volume (vv)
"""
if method=='poly' and not flag_poly.flag_stack:
print("Polynomial stack not present; use set_poly to create")
return
if method=='spline' and not flag_spline.flag_stack:
print("Spline stack not present; use set_spline to create")
return
nph,vol=number_phonon_mode(ifr,tt,vol,method=method)
g_i,f_i=grun_mode_vol(ifr,vol,method=method,plot=False)
pz_i=(1./(2*vol*1e-21))*h*(f_i*csl)*g_i
pth_i=(1./(vol*1e-21))*nph*h*(f_i*csl)*g_i
p_total_i=(pz_i+pth_i)*deg[ifr]
return p_total_i
def pressure_phonon(tt,vol,method='poly',plot=True):
"""
Vibrational pressure from all the normal modes at given temperature
and volume
Args:
tt: temperature
vv: volume
method (optional): method chosen for the frequency/volume values
(default: 'poly'; other possible method='spline')
plot (optional): if True (default), plots the contribution to the
vibrational pressure of all the normal modes.
Returns:
If plot=False, outputs the vibrational pressure of all the modes
(in GPa) at the selected temperature (tt) and volume (vv).
"""
if method=='poly' and not flag_poly.flag_stack:
print("Polynomial stack not present; use set_poly to create")
return
if method=='spline' and not flag_spline.flag_stack:
print("Spline stack not present; use set_spline to create")
return
p_list=np.array([])
for ifr in int_mode:
p_total_i=pressure_phonon_mode(ifr,tt,vol,method=method)
p_list=np.append(p_list,p_total_i)
p_total=p_list.sum()
if plot:
plt.figure()
plt.plot(int_mode,p_list,"r*")
plt.xlabel("Mode number")
plt.ylabel("Pressure (GPa)")
plt.show()
print("\nTotal phonon pressure: %4.2f GPa " % p_total)
if not plot:
return p_list
else:
return
def upload_mineral(tmin,tmax,points=12,HT_lim=0., t_max=0., deg=1, g_deg=1, model=1, mqm='py',\
b_dir=False, blk_dir=False, extra_alpha=True, volc=False):
"""
Prepares data to be uploaded in the mineral database.
Args:
tmin: minimum temperature for fit K0, Cp and alpha
tmax: maximum temperature for fit
points: number of points in the T range for fit
mqm: name of the mineral, as specified in the internal
database,
b_dir: if True, the alpha_dir_serie function is used for the
computation of thermal expansion
blk_dir: if True, the bulk_modulus_p_serie function is used
to compute the bulk modulus as a function of T
(with noeos=False); K0, V0 and Kp are from an eos_temp
computation.
If False, the function bulk_serie is used.
HT_lim: Temperature at which the Dulong-Petit limit for Cv
is supposed to be reached (default 0.: no Dulong-Petit
model)
t_max: maximum temperature for the power series fit of Cp(T);
if t_max=0. (default), t_max=HT_lim. The parameter is
relevant oly if HT_lim is not zero.
model: Used in the HT_limit estimation of Cv; Einstein model
for Cv(T) with one frequency (default model=1), or with
2 frequencies (model=2)
deg: Used in the HT limit estimation of alpha (relevant if
HT_lim > 0; default: 1)
g_deg: Used in the HT limit estimation of Cp (relevant if
HT_lim > 0.; default 1)
extra_alpha: if True, the high temperature estrapolation
is done (relevant if HT_lim > 0; default: True)
volc: if True, V0 is set at the value found in the database
(default: False)
Example:
>>> upload_mineral(300,800,16,mqm='coe', b_dir=True)
"""
flag_int=False
if f_fix.flag:
kp_original=f_fix.value
flag_int=True
reset_fix()
if not volume_correction.flag:
volume_correction.set_volume(eval(mqm).v0)
volume_correction.on()
g0=eval(mqm).g0
if b_dir and blk_dir:
v0, k_gpa, kp=eos_temp(298.15,prt=False, update=True)
fit_b,_=bulk_modulus_p_serie(tmin, tmax,5,0, noeos=False, fit=True, deg=1, out=True)
dkt=fit_b[0]
else:
v0, k_gpa, kp=eos_temp(298.15,prt=False, update=True)
set_fix(kp)
fit_b=bulk_serie(tmin,tmax,5,degree=1,update=True)
dkt=fit_b[0]
if not volc:
v0=v0*1e-30*1e5*avo/zu
else:
v0=volume_correction.v0_init
ex_internal_flag=False
if exclude.flag & (not anharm.flag):
exclude.restore()
ex_internal_flag=True
print("\nWarning ** Excluded modes restored in order\nto "
"correctly compute entropy and specific heat")
print("At the end of computation, exclusion of modes "
"will be rectivated")
s0,dum=entropy_p(298.15,0.0001,prt=False)
if ex_internal_flag:
exclude.on()
ex_internal_flag=False
mdl=1
if model==2:
mdl=2
if HT_lim > 0.:
fit_cp=cp_serie(tmin,tmax,points,0.0001,HTlim=HT_lim, t_max=t_max, g_deg=g_deg, model=mdl, prt=False)
if extra_alpha:
fit_al=alpha_serie(tmin,tmax,points,0.0001,HTlim=HT_lim, t_max=t_max, degree=deg,prt=False)
else:
fit_al=alpha_serie(tmin,tmax,points,0.0001,prt=False)
else:
fit_cp=cp_serie(tmin,tmax,points,0.0001, g_deg=g_deg, prt=False)
if not b_dir:
fit_al=alpha_serie(tmin,tmax,points,0.0001,prt=False,g_deg=g_deg)
else:
fit_al=alpha_dir_serie(tmin,tmax,points,0.0001,prt=False)
eval(mqm).load_ref(v0,g0,s0)
eval(mqm).load_bulk(k_gpa,kp,dkt)
eval(mqm).load_cp(fit_cp,power)
eval(mqm).load_alpha(fit_al,power_a)
eval(mqm).eos='bm'
reset_fix()
if flag_int:
set_fix(kp_original)
def upload_mineral_2(tmin,tmax,points=12,HT_lim=0., t_max=0., g_deg=1, model=1, mqm='py',\
alpha_dir=False, dir=False, volc=False):
"""
Prepares data to be uploaded in the mineral database.
Args:
tmin: minimum temperature for fit K0, Cp and alpha
tmax: maximum temperature for fit
points: number of points in the T range for fit
mqm: name of the mineral, as specified in the internal
database,
alpha_dir: if True, the alpha_dir_serie function is used for the
computation of thermal expansion
dir: if True, the bulk_modulus_p_serie function is used
to compute the bulk modulus as a function of T
(with noeos=False); K0, V0 and Kp are from an eos_temp
computation.
If False, the function bulk_serie is used.
HT_lim: Temperature at which the Dulong-Petit limit for Cv
is supposed to be reached (default 0.: no Dulong-Petit
model)
t_max: maximum temperature for the power series fit of Cp(T);
if t_max=0. (default), t_max=HT_lim. The parameter is
relevant oly if HT_lim is not zero.
model: Used in the HT_limit estimation of Cv; Einstein model
for Cv(T) with one frequency (default model=1), or with
2 frequencies (model=2)
g_deg: Used in the HT limit estimation of Cp (relevant if
HT_lim > 0.; default 1)
volc: if True, V0 is set at the value found in the database
(default: False)
Example:
>>> upload_mineral(300,800,16,mqm='coe', blk_dir=True)
"""
flag_int=False
if f_fix.flag:
kp_original=f_fix.value
flag_int=True
reset_fix()
if not volume_correction.flag:
volume_correction.set_volume(eval(mqm).v0)
volume_correction.on()
g0=eval(mqm).g0
if dir:
t_list=np.linspace(298, tmax, points)
v0, k_gpa, kp=bulk_dir(298,prt=False,out=True)
xx=list(bulk_dir(tt,prt=False,out=True) for tt in t_list)
xx=np.array(xx)
v_list=xx[:,0]
k_list=xx[:,1]
fit_k=np.polyfit(t_list,k_list,1)
dkt=fit_k[0]
else:
v0, k_gpa, kp=eos_temp(298.15,prt=False, update=True)
set_fix(kp)
fit_b=bulk_serie(298.15,tmax,5,degree=1,update=True)
dkt=fit_b[0]
if alpha_dir:
volume_ctrl.set_shift(0.)
t_list=np.linspace(tmin, tmax, points)
print("\n*** alpha_dir computation: V(T) curve computed with the")
print(" bulk_modulus_p_serie function")
b_par,v_par=bulk_modulus_p_serie(tmin,tmax,points,0,noeos=True,fit=True,type='spline',\
deg=3,smooth=5,out=True)
v_list=v_par(t_list)
fit_v=np.polyfit(t_list,v_list,4)
fit_der=np.polyder(fit_v,1)
alpha_list=(np.polyval(fit_der,t_list))
alpha_list=list(alpha_list[it]/v_list[it] for it in np.arange(points))
coef_ini=np.ones(lpow_a)
fit_al, alpha_cov=curve_fit(alpha_dir_fun,t_list,alpha_list,p0=coef_ini)
t_plot=np.linspace(tmin, tmax, points*3)
alpha_plot=alpha_dir_fun(t_plot,*fit_al)
plt.figure()
plt.plot(t_plot, alpha_plot, "k-", label="Fit")
plt.plot(t_list, alpha_list, "k*", label="Actual values")
plt.legend(frameon=False)
plt.title("Thermal expansion")
plt.xlabel("T (K)")
plt.ylabel("Alpha (K^-1)")
plt.show()
else:
fit_al=alpha_serie(tmin,tmax,points,0.0001,prt=False,g_deg=g_deg)
if not volc:
v0=v0*1e-30*1e5*avo/zu
else:
v0=volume_correction.v0_init
ex_internal_flag=False
if exclude.flag & (not anharm.flag):
exclude.restore()
ex_internal_flag=True
print("\nWarning ** Excluded modes restored in order\nto "
"correctly compute entropy and specific heat")
print("At the end of computation, exclusion of modes "
"will be rectivated")
if dir:
volume_ctrl.set_shift(0.)
vol=volume_dir(298.15,0)
s0, dum=entropy_v(298.15,vol)
else:
s0,dum=entropy_p(298.15,0.0001,prt=False)
if ex_internal_flag:
exclude.on()
ex_internal_flag=False
mdl=1
if model==2:
mdl=2
if HT_lim > 0.:
fit_cp=cp_serie(tmin,tmax,points,0.0001,HTlim=HT_lim, t_max=t_max, g_deg=g_deg, model=mdl, prt=False)
else:
fit_cp=cp_serie(tmin,tmax,points,0.0001, g_deg=g_deg, prt=False)
eval(mqm).load_ref(v0,g0,s0)
eval(mqm).load_bulk(k_gpa,kp,dkt)
eval(mqm).load_cp(fit_cp,power)
eval(mqm).load_alpha(fit_al,power_a)
eval(mqm).eos='bm'
reset_fix()
if flag_int:
set_fix(kp_original)
def reaction_dir(tt,pp,mqm,prod_spec, prod_coef, rea_spec, rea_coef):
mv0=eval(mqm+'.v0')
mg0=eval(mqm+'.g0')
qm_energy=g_vt_dir(tt,pp,v0=mv0, g0=mg0)
gprod=0.
for pri, pci in zip(prod_spec, prod_coef):
if pri != mqm:
gprod=gprod+(eval(pri+'.g_tp(tt,pp)'))*pci
else:
gprod=gprod+qm_energy*pci
grea=0.
for ri,rci in zip(rea_spec, rea_coef):
if ri != mqm:
grea=grea+(eval(ri+'.g_tp(tt,pp)'))*rci
else:
grea=grea+qm_energy*rci
return gprod-grea
def pressure_react_dir(tt,mqm,prod_spec, prod_coef, rea_spec, rea_coef):
fpr=lambda pp: (reaction_dir(tt,pp,mqm, \
prod_spec, prod_coef, rea_spec, rea_coef))**2
pres=scipy.optimize.minimize_scalar(fpr,tol=0.001)
return pres.x
def equilib_dir(tini,tfin,npoint, mqm='py', \
prod=['py',1], rea=['ens',1.5,'cor', 1], out=False):
"""
Computes the equilibrium pressure for a reaction involving a
given set of minerals, in a range of temperatures.
Args:
tini: minimum temperature in the range
tfin: maximum temperature in the range
npoint: number of points in the T range
mqm: mineral phase dealt at the quantum mechanical level,
whose Gibbs free energy is computed as G=F+PV
prod: list of products of the reaction in the form
[name_1, c_name_1, name_2, c_name_2, ...]
where name_i is the name of the i^th mineral, as stored
in the database, and c_name_i is the corresponding
stoichiometric coefficient
rea: list of reactants; same syntax as the "prod" list.
Example:
>>> equilib_dir(300, 500, 12, mqm='py', prod=['py',1], rea=['ens', 1.5, 'cor', 1])
"""
lprod=len(prod)
lrea=len(rea)
prod_spec=prod[0:lprod:2]
prod_coef=prod[1:lprod:2]
rea_spec=rea[0:lrea:2]
rea_coef=rea[1:lrea:2]
flag_volume_max.value=False
lastr=rea_spec[-1]
lastp=prod_spec[-1]
prod_string=''
for pri in prod_spec:
prod_string=prod_string + pri
if pri != lastp:
prod_string=prod_string+' + '
rea_string=''
for ri in rea_spec:
rea_string = rea_string + ri
if ri != lastr:
rea_string=rea_string+' + '
t_list=np.linspace(tini,tfin,npoint)
p_list=np.array([])
for ti in t_list:
pi=pressure_react_dir(ti,mqm, prod_spec, prod_coef, rea_spec, rea_coef)
p_list=np.append(p_list,pi)
p_new=np.array([])
t_new=np.array([])
for pi, ti in zip(p_list, t_list):
p_new=np.append(p_new,pi)
t_new=np.append(t_new,ti)
serie=(t_new,p_new)
| pd.set_option('colheader_justify', 'center') | pandas.set_option |
import datetime
from collections import OrderedDict
import pandas as pd
from google.cloud import bigquery
CLIENT = None
PROJECT_ID = None
def insert_date_range(sql, date_range):
start, end = date_range
if start is None and end is None: return sql
if start is None:
return sql + ' WHERE `date` <= DATE("%s")' % end
if end is None:
return sql + ' WHERE `date` >= DATE("%s")' % start
return sql + ' WHERE DATE("%s") <= `date` AND `date` <= DATE("%s")' % (start, end)
# define helper fns:
def query_covariate_df_from_gbq(pid, date_range, covariate):
"""
Query a table from Google BigQuery, via SQL.
:param pid: patient id (str)
:param covariate: `heartrate`, `step`, `sleep`
"""
assert covariate in ['heartrate', 'steps', 'sleep']
columns = ['Date', 'Time', 'Source', 'Value']
if covariate != 'sleep':
sql = """
SELECT date, time, device, value
FROM `%s.%s.%s`
""" % (PROJECT_ID, pid, covariate)
else:
sql = """
SELECT date, time, device, type, value
FROM `%s.%s.%s`
""" % (PROJECT_ID, pid, covariate)
columns = ['Date', 'Time', 'Source', 'Value', 'n_sleep_seconds']
sql = insert_date_range(sql, date_range)
df = CLIENT.query(sql).to_dataframe()
df.columns = columns
try:
df['date_time'] = pd.to_datetime(df['date_time'])
except KeyError: # if there is SHIT it in the db
df['date_time'] = df['date_time'] = ['%s %s' % (d, t) for d, t in zip(df['Date'].values, df['Time'].values)]
df['date_time'] = | pd.to_datetime(df['date_time']) | pandas.to_datetime |
from json import load
from pickle import FALSE
from tools.funclib import table2fasta
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
import benchmark_common as bcommon
import config as cfg
import os
#region 获取酶训练的数据集
def get_enzyme_train_set(traindata):
"""[获取酶训练的数据集]
Args:
traindata ([DataFrame]): [description]
Returns:
[DataFrame]: [trianX, trainY]
"""
train_X = traindata.iloc[:,7:]
train_Y = traindata['isemzyme'].astype('int')
return train_X, train_Y
#endregion
#region 获取几功能酶训练的数据集
def get_howmany_train_set(train_data):
"""[获取几功能酶训练的数据集]
Args:
train_data ([DataFrame]): [完整训练数据]
Returns:
[DataFrame]: [[train_x, trian_y]]
"""
train_data = train_data[train_data.isemzyme] #仅选用酶数据
train_X = train_data.iloc[:,7:]
train_Y =train_data['functionCounts'].astype('int')
return train_X, pd.DataFrame(train_Y)
#endregion
#region 获取EC号训练的数据集
def get_ec_train_set(train_data, ec_label_dict):
"""[获取EC号训练的数据集]
Args:
ec_label_dict: ([dict]): [ec to label dict]
train_data ([DataFrame]): [description]
Returns:
[DataFrame]: [[train_x, trian_y]]
"""
if cfg.TRAIN_USE_ONLY_ENZYME:
train_data = train_data[train_data.isemzyme] #仅选用酶数据
# if cfg.TRAIN_USE_ONLY_SINGLE_FUNCTION:
train_data = train_data[train_data.functionCounts ==1] #仅选用单功能酶数据
train_data = train_data[(train_data.ec_specific_level >= cfg.TRAIN_USE_SPCIFIC_EC_LEVEL) |(train_data.ec_specific_level ==0)]
train_data.reset_index(drop=True, inplace=True)
train_data.insert(loc=1, column='ec_label', value=train_data.ec_number.apply(lambda x: ec_label_dict.get(x)))
# train_data['ec_label'] = train_data.ec_number.apply(lambda x: ec_label_dict.get(x))
train_X = train_data.iloc[:, 8:]
train_Y =train_data['ec_label']
return train_X, pd.DataFrame(train_Y)
#endregion
#region 训练是否是酶模型
def train_isenzyme(X,Y, model_file, vali_ratio=0.3, force_model_update=False):
"""[训练是否是酶模型]
Args:
vali_ratio:
X ([DataFrame]): [特征数据]
Y ([DataFrame]): [标签数据]
model_file ([string]): [模型的存放路径]
force_model_update (bool, optional): [是否强制更新模型]. Defaults to False.
Returns:
[object]: [训练好的模型]
"""
if os.path.exists(model_file) and (force_model_update==False):
return
else:
x_train, x_vali, y_train, y_vali = train_test_split(X,np.array(Y).ravel(),test_size=vali_ratio,random_state=1)
eval_set = [(x_train, y_train), (x_vali, y_vali)]
model = XGBClassifier(
objective='binary:logistic',
random_state=42,
use_label_encoder=False,
n_jobs=-2,
eval_metric='mlogloss',
max_depth=6,
n_estimators= cfg.TRAIN_HOWMANY_ENZYME_LEARNING_STEPS
)
print(model)
# model.fit(X, Y.ravel())
model.fit(x_train, y_train, eval_metric="logloss", eval_set=eval_set, verbose=True)
joblib.dump(model, model_file)
print('XGBoost模型训练完成')
return model
#endregion
#region 构建几功能酶模型
def train_howmany_enzyme(data_x, data_y, model_file, vali_ratio=0.3, force_model_update=False):
"""[构建几功能酶模型]
Args:
force_model_update:
vali_ratio:
model_file:
data_x ([DataFrame]): [X训练数据]
data_y ([DataFrame]): [Y训练数据]
Returns:
[object]: [训练好的模型]
"""
if os.path.exists(model_file) and (force_model_update==False):
return
else:
x_train, x_vali, y_train, y_vali = train_test_split(data_x,np.array(data_y).ravel(),test_size=vali_ratio,random_state=1)
eval_set = [(x_train, y_train), (x_vali, y_vali)]
model = XGBClassifier(
min_child_weight=6,
max_depth=6,
objective='multi:softmax',
num_class=10,
use_label_encoder=False,
n_estimators=cfg.TRAIN_HOWMANY_ENZYME_LEARNING_STEPS
)
print("-" * 100)
print("几功能酶xgboost模型:", model)
model.fit(x_train, y_train, eval_metric="mlogloss", eval_set=eval_set, verbose=False)
# # 打印重要性指数
bcommon.importance_features_top(model, x_train, topN=50)
# 保存模型
joblib.dump(model, model_file)
return model
#endregion
def make_ec_label(train_label, test_label, file_save, force_model_update=False):
if os.path.exists(file_save) and (force_model_update==False):
print('ec label dict already exist')
return
ecset = sorted( set(list(train_label) + list(test_label)))
ec_label_dict = {k: v for k, v in zip(ecset, range(len(ecset)))}
np.save(file_save, ec_label_dict)
return ec_label_dict
print('字典保存成功')
#region 训练slice模型
def train_ec_slice(trainX, trainY, modelPath, force_model_update=False):
"""[训练slice模型]
Args:
trainX ([DataFarame]): [X特征]
trainY ([DataFrame]): [ Y标签]]
modelPath ([string]): [存放模型的目录]
force_model_update (bool, optional): [是否强制更新模型]. Defaults to False.
"""
if os.path.exists(modelPath+'param') and (force_model_update==False):
print('model exist')
return
cmd = ''' ./slice_train {0} {1} {2} -m 100 -c 300 -s 300 -k 700 -o 32 -t 32 -C 1 -f 0.000001 -siter 20 -stype 0 -q 0 '''.format(trainX, trainY, modelPath)
print(cmd)
os.system(cmd)
print('train finished')
#endregion
if __name__ =="__main__":
# 1. 读入数据
print('step 1 loading data')
train = | pd.read_feather(cfg.TRAIN_FEATURE) | pandas.read_feather |
import pandas as pd
import folium
import math
from itertools import combinations
from pyproj import Proj, transform
from tqdm import tqdm
from typing import List
def preprocess_data(path: str) -> pd.DataFrame:
"""
"Note": Modify and use according to your own data.
Or you don't need to use this code, and you just insert some code about preprocess in "main"
Explanation
Data Path is received as inputs and data is purified in the order of "name"/"latitude"/"longitude".
Arguments
path: A path of a file in the form of 'xlsx' and 'csv' is input.
Return
Pandas Data Frame: Form of Pandas Data Frame with Column in the order of "Name", "Latitude", and "Longitude"
"""
if "xlsx" in path:
data = pd.read_excel(path)
############ Modify according to input/output ####################
if "대구분명칭" in data.columns:
data = data[["대구분명칭", "위도", "경도"]]
elif "시장명" in data.columns:
data = data[["시장명", "위도", "경도"]]
data.columns = ["name", "latitude", "longitude"]
##########################################################
elif "csv" in path:
data = pd.read_csv(
path, header=None
) # Header options are also modified according to your data.
############ Modify according to input/output ####################
data.columns = ["longitude", "latitude", "name"]
data = data[["name", "latitude", "longitude"]]
##########################################################
return data
def coordinate_change(data: pd.DataFrame, c1: str, c2: str) -> pd.DataFrame:
"""
Explanation
The latitude and longitude existing in the data frame are converted from the coordinate c1 to the coordinate c2 to be converted.
Arguments
data: The columns are in the form of a Pandas Data Frame in the order of "name", "latitude", and "longitude".
c1: The original latitude and longitude coordinate system.
c2: The latitude and longitude coordinate system to convert.
c1 & c2: something like 'epsg:5178', 'epsg:4326', etc..
Return
Pandas Data Frame: Data frame with converted latitude and longitude coordinates.
"""
proj_c1 = Proj(init=c1)
proj_c2 = Proj(init=c2)
for i in tqdm(range(len(data))):
change_long, change_lat = transform(
proj_c1, proj_c2, data["longitude"][i], data["latitude"][i]
)
data["longitude"][i] = change_long
data["latitude"][i] = change_lat
return data
def shortest_distance(F: pd.DataFrame, L: pd.DataFrame):
"""
Explanation
Create the shortest matrix between public facilities and floating population.
Arguments
F: Coordinates of public facilities.
L: Coordinates of the floating population.
Return
Pandas Data Frame: Shortest distance matrix between public facilities and floating population
"""
F_list = []
L_list = []
for i in range(len(F)):
name = f"F_{i}"
F_list.append(name)
for i in range(len(L)):
name = f"L_{i}"
L_list.append(name)
distance = pd.DataFrame(columns=F_list, index=L_list)
for i in range(len(distance)):
for j, col in enumerate(distance.columns):
square_sum = ((F["latitude"][j] - L["latitude"][i]) ** 2) + (
(F["longitude"][j] - L["longitude"][i]) ** 2
)
dist = math.sqrt(square_sum)
distance[col][i] = dist
return distance
def p_list_set(distance_data: pd.DataFrame, p: int) -> List[List]:
"""
Explanation
Based on F(Public Facilities), '2p' public facilities with the shortest
distance from the floating population coordinates are selected
Args
distance_data: The matrix of distances between F and L. (column = F, row = L)
p: The number of public facilities to be finally selected.
return
p_list_set: A set of p lists tied up in p
ex) candidate= [1,2,3,4,5,6,7,8,9,10] / p=3
p_list_set = [
[1,2,3],
[2,3,4],
[3,4,5]
]
"""
# The sum of the distances between the coordinates of the floating population for public facilities.
col_sum = list(distance_data.sum(axis=0))
col_sum_tuple = [] # Tie col_sum with index.
for i in range(len(col_sum)):
tup = (i, col_sum[i])
col_sum_tuple.append(tup)
col_sum_tuple.sort(key=lambda x: x[1])
col_sum_tuple = col_sum_tuple[: 2 * p] # Choose the top 2p based on distance.
p_list_set = [col_sum_tuple[i : i + p] for i in range(p)]
return p_list_set
def candidate_place(
pb: pd.DataFrame, distance: pd.DataFrame, p_list_set: List[List]
) -> List:
"""
Explanation
Only names are extracted from DataFrame having a minimal distance within each set.
Args:
pb: DataFrame for public facilities.
distance: DataFrame about the distance between F and L
p_list_set: In distance, a set of p lists grouped by p based on distance
Return:
List: List of names of p public facilities.
"""
min_sum_list = [] # take the minimum values in the pth list.
for i in range(len(p_list_set)):
tup_check = []
for j in p_list_set[i]:
tup_check.append(f"F_{j[0]}")
check_df = distance[tup_check]
check_df["min"] = 0 # generate 'min' column
for k in range(len(check_df)):
k_th_row = check_df.iloc[k][:-1] # exclude 'min' column
check_df["min"][k] = min(k_th_row)
min_sum_value = sum(check_df["min"])
min_sum_list.append(min_sum_value)
final_index = min_sum_list.index(min(min_sum_list))
final_set = p_list_set[final_index]
final_set.sort(key=lambda x: x[0])
final_idx = [idx for idx, dist in final_set]
final_market_data = pb.iloc[final_idx, :]
final_market_data.reset_index(drop=True, inplace=True)
name_list = [name for name in final_market_data["name"]]
return name_list
def top_value(char_list: List) -> int:
"""Heuristic Method with P-Median
Explanation
Get the top three to six. (If there is a duplicate value, bring up to six.)
Args:
char_list: A list of the names of the final candidates.
Return:
int: The number of final candidates to get
"""
appearance_candidate = list( | pd.Series(char_list) | pandas.Series |
"""Predict all plots which have NEON field data"""
from deepforest import deepforest
import os
import rasterstats
import geopandas as gp
import pandas as pd
from crown_maps.LIDAR import non_zero_99_quantile
from crown_maps.predict import predict_tiles, project
def run(eval_path, CHM_dir, min_height=3):
#Predict
model = deepforest.deepforest()
model.use_release()
#Load field data
field_data = pd.read_csv("Figures/vst_field_data.csv")
#Load field data locations
site_shp = gp.read_file("Figures/All_NEON_TOS_Plots_V7/All_NEON_TOS_Plot_Polygons_V7.shp")
#Which locations have data
site_shp = site_shp[site_shp.plotID.isin(field_data.plotID.unique())]
site_shp["path"] = site_shp.plotID.apply(lambda x: "{}.tif".format(os.path.join(eval_path,x)))
#Predict each unique tile
tiles_to_predict = site_shp["path"].unique()
results = [ ]
for tile in tiles_to_predict:
try:
result = model.predict_tile(tile,return_plot=False,patch_size=400, iou_threshold=0.1)
result["plot_name"] = os.path.splitext(os.path.basename(tile))[0]
results.append(result)
except Exception as e:
print(e)
continue
boxes = pd.concat(results)
#Create plot name groups
boxes_grouped = boxes.groupby('plot_name')
plot_groups = [boxes_grouped.get_group(x) for x in boxes_grouped.groups]
#Set RGB dir
rgb_dir = os.path.dirname(eval_path)
#Project
threshold_boxes = []
for x in plot_groups:
plot_name = x.plot_name.unique()[0]
#Look up RGB image for projection
image_path = "{}/{}.tif".format(rgb_dir,plot_name)
result = project(image_path,x)
#Extract heights
chm_path = "{}_CHM.tif".format(os.path.join(CHM_dir, plot_name))
try:
height_dict = rasterstats.zonal_stats(result, chm_path, stats="mean", add_stats={'q99':non_zero_99_quantile})
except Exception as e:
print("{} raises {}".format(plot_name,e))
continue
x["height"] = [g["q99"] for g in height_dict]
#Merge back to the original frames
threshold_boxes.append(x)
threshold_boxes = pd.concat(threshold_boxes)
threshold_boxes = threshold_boxes[threshold_boxes.height > min_height]
threshold_boxes["area"] = (threshold_boxes["top"] - threshold_boxes["bottom"]) * (threshold_boxes["right"] - threshold_boxes["left"])
threshold_boxes = threshold_boxes[["plot_name","xmin","ymin","xmax","ymax","score","label","height","area"]]
return threshold_boxes
def create_shapefiles(eval_path, CHM_dir, min_height=3, save_dir="."):
#Predict
model.config[""]
model = deepforest.deepforest()
model.use_release()
#Load field data
field_data = | pd.read_csv("Figures/vst_field_data.csv") | pandas.read_csv |
#!/usr/bin/env python3
import sys
import argparse
import loompy
import numpy as np
import pandas as pd
def main():
description = """This script compares two loom files and checks that they contain identical data up to a constant"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--truth-loom", dest="truth_loom_path", required=True, help="Path to truth loom file", type=str)
parser.add_argument("--check-loom", dest="check_loom_path", required=True, help="Path to loom file to check", type=str)
parser.add_argument("--delta-cutoff", dest="delta_cutoff", required=True, help="Max delta value allowed", type=int)
args = parser.parse_args()
truth_loom = loompy.connect(args.truth_loom_path)
check_loom = loompy.connect(args.check_loom_path)
truth_loom_array = | pd.DataFrame(data=truth_loom[:, :], index=truth_loom.row_attrs['gene_names'], columns=truth_loom.col_attrs['cell_names']) | pandas.DataFrame |
import argparse
import sys
import random
import csv
import ujson
import re
import pandas as pd
import numpy as np
from collections import Counter
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import wordvecdata as wvd
from sklearn.metrics import average_precision_score
from datetime import datetime
COLUMNS = ["node1", "node2", "node3"]
LABEL_COLUMN = "label"
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper parameters. TODO pass this from command line
PATH = "saved_models/best_biogrid_model.pth"
CUR_PATH = "saved_models/cur_biogrid_model.pth"
num_classes = 2
batch_size = 100
learning_rate = 0.00001
frame_link_amt = 50
conv_height = 7
#Torch Dataset class to hold data
class LinksDataset(Dataset):
def __init__(self, features_array, labels_array, transform=torch.from_numpy):
"""
Args:
features_array:
labels_array:
transform:
"""
self.link_features = features_array
self.labels = labels_array
self.transform = transform
def __len__(self):
return len(self.link_features)
def __getitem__(self, idx):
link_features = self.link_features[idx]
label = self.labels[idx]
if self.transform:
link_features = self.transform(link_features)
return link_features, label
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes, conv_height, conv_width=300):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 128, kernel_size=(conv_height, conv_width), stride=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2,1))
)
self.fc = nn.Linear(2816, 1)
self.softmax = nn.Softplus()
def forward(self, x):
out = self.layer1(x)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
out = self.softmax(out)
return out
def get_input(df, embeddings,index_map, combination_method='hadamard', data_purpose='train'):
"""Build model inputs."""
dim_size = embeddings.shape[1]
features = []
# Converts the label column into a constant Tensor.
label_values = df[LABEL_COLUMN].values
assert(combination_method in ['hadamard','average', 'weighted_l1', 'weighted_l2', 'concatenate']), "Invalid combination Method %s" % combination_method
padding = np.array([0.0] * dim_size, dtype='float32')
if data_purpose != 'eval':
print("Combining with {}.".format(combination_method))
total_real_links = 0
max_real_links = 0
min_real_links = 10000
total_rows = 0
c_lst = [] #ordered list of the Cs (for evaluation)
c_lst_dict = {} #dict to support fast searching of above. TODO: investigate ordered dicts
col_keys = []
for ind, row in enumerate(df.itertuples()):
input_str = row.train_nodes
conv_rows = input_str.split('-')
c = conv_rows[0].split('::')[2]
if c not in c_lst_dict:
c_lst.append(c)
c_lst_dict[c] = 1
conv_rows_cnt = len(conv_rows)
total_real_links += conv_rows_cnt
if conv_rows_cnt > max_real_links:
max_real_links = conv_rows_cnt
if conv_rows_cnt < min_real_links:
min_real_links = conv_rows_cnt
if conv_rows_cnt < frame_link_amt:
needed = frame_link_amt - len(conv_rows)
for i in range(needed):
if i % 2 == 0:
conv_rows.append('PAD::PAD::PAD')
else:
conv_rows = ['PAD::PAD::PAD'] + conv_rows
frame = {'node1':[], 'node2':[], 'node3':[],}
for crow in conv_rows:
node1, node2, node3 = crow.split('::')
frame['node1'].append(node1)
frame['node2'].append(node2)
frame['node3'].append(node3)
instance_df = pd.DataFrame(frame)
feature_cols = {}
column_tensors = []
for i in COLUMNS:
#Ability to weight columns and increase numerical values (esp. for LINE)
if i == 'node1':
col_weight = 10.0
elif i == 'node2':
col_weight = 10.0
elif i == 'node3':
col_weight = 10.0
words = [value for value in instance_df[i].values]
col_keys.append([w_ for w_ in words if value != 'PAD'])
ids = [index_map[word] if word != 'PAD' else -1 for word in words]
column_tensors.append([np.multiply(np.array(embeddings[id_]), col_weight) if id_ != -1 else padding for id_ in ids])
instance_features = np.array(column_tensors[0])
no_output = ['map']
for i in range(1, len(column_tensors)):
if combination_method == 'hadamard':
instance_features = np.multiply(instance_features, column_tensors[i])
elif combination_method == 'average':
instance_features = np.mean(np.array([ instance_features, column_tensors[i] ]), axis=0)
elif combination_method == 'weighted_l1':
instance_features = np.absolute(np.subtract(instance_features, column_tensors[i]))
elif combination_method == 'weighted_l2':
instance_features = np.square(np.absolute(np.subtract(instance_features, column_tensors[i])))
elif combination_method == 'concatenate':
instance_features = np.concatenate([instance_features, column_tensors[i]], 1)
#Combine all feature vectors into the conv window size instead of simply truncating
updated_instance_features = instance_features[:frame_link_amt]
start_row = frame_link_amt
end_row = frame_link_amt * 2
while end_row <= len(conv_rows):
updated_instance_features = np.add(updated_instance_features, instance_features[start_row:end_row])
start_row = end_row
end_row += frame_link_amt
#sum the remaining rows
#TODO: can perhaps be done more elegantly with modulus of len(conv_rows) and frame_link_amt
if start_row < len(conv_rows):
padding_needed = end_row - len(conv_rows)
padded_instance_features = np.concatenate([instance_features[start_row:], np.ones((padding_needed, instance_features.shape[1]),dtype='float32')])
updated_instance_features = np.add(updated_instance_features, padded_instance_features)
features.append(np.expand_dims(updated_instance_features, axis=0))
total_rows = ind
if data_purpose != 'eval':
print("\nReal links in conv window stats: Range from {}-{} with a mean of {}.".format(min_real_links, max_real_links,
total_real_links/max(total_rows, 1)))
if data_purpose in ['test', 'eval']:
return features, np.array([1 if val > 0 else 0 for val in label_values if val != -1]), label_values, c_lst
return features, np.array([val * 100 for val in label_values if val != -1])
def label_func(x, positive_labels):
for ind, label in enumerate(positive_labels, 1):
if label in x:
return ind
return 0
def build_model(conv_width):
"""Build model."""
model = ConvNet(num_classes, conv_height, conv_width).to(device)
# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
return model, criterion, optimizer
def train_and_eval(train_epochs, train_data, devel_data, test_data, train_embeddings_file_name, test_embeddings_file_name,
devel_filename, eval_filename, devel_unformed_filename, unformed_filename, positive_labels, combination_method, method,
c_lst, lbd_type, experiment_name, all_data, neighbours_data, cutoff_year):
"""Train and evaluate the model."""
index_map, weights = wvd.load(test_embeddings_file_name)
#Get positive labels
positive_labels = positive_labels.split(',')
print("reading training data...")
train_file_name = train_data
df_train = pd.read_table(train_file_name, dtype={'train_nodes':str})
df_train = df_train.sample(frac=1)
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
#Get inputs
train_x, labels = get_input(df_train, weights, index_map, combination_method)
train_loader = torch.utils.data.DataLoader(dataset=LinksDataset(train_x, labels), batch_size=batch_size, shuffle=True)
pos_labels = [l_ for l_ in labels if l_ != 0]
#Start Loading devel set
print("reading devel data...")
devel_file_name = devel_data
df_devel = pd.read_table(devel_file_name, dtype={'train_nodes':str})
df_devel = df_devel.sample(frac=1)
# remove NaN elements
df_devel = df_devel.dropna(how='any', axis=0)
#Get inputs
devel_x, dev_labels = get_input(df_devel, weights, index_map, combination_method)
devel_loader = torch.utils.data.DataLoader(dataset=LinksDataset(devel_x, dev_labels), batch_size=batch_size, shuffle=False)
dev_pos_labels = [l_ for l_ in dev_labels if l_ != 0]
#End Loading devel set
#Start prepping dev data
#Initial testing will select 1st 1000 egs in devel/test data from shuffled file 9gives randomness that is easily replicated across all methods). Read these lines
with open(devel_filename) as inp_file:
chosen_As = {}
for ind, line in enumerate(csv.reader(inp_file, delimiter='\t')): #quoting=csv.QUOTE_NONE - If req to make data work, examine data
a = line[0].replace(' ', '_').replace('-', '_')
cs = line[1].split(';')
chosen_As[a] = 1
if ind >= 999:
break
print("In dev, there were {} chosen As".format(len(chosen_As)))
a_dfs = {}
for a in chosen_As.keys():
a_dfs[a] = {'train_nodes': [], 'label':[]}
#Filter test for chosen As which formed (True positives)
formed_c = {}
for ind, row in enumerate(df_devel.itertuples()):
input_str = row.train_nodes
conv_rows = input_str.split('-')
link_lst = conv_rows[0].split('::')
a = link_lst[0]
c = link_lst[2]
if a in chosen_As.keys():
a_dfs[a]['train_nodes'].append(row.train_nodes)
a_dfs[a]['label'].append(row.label)
formed_c["{}::{}".format(a,c)] = 1
#Add unformed edges for the chosen As (True negatives)
a_c_regex = r"'(.*?)'"
unformed_edges = 0
if 'json' in devel_unformed_filename:
with open(devel_unformed_filename) as uf:
data = ujson.loads(uf.read())
for ac, b_lst in data.iteritems():
ac_extract = re.findall(a_c_regex, ac)
a = ac_extract[0].replace(' ', '_').replace('-', '_')
c = ac_extract[1].replace(' ', '_').replace('-', '_')
if a in chosen_As.keys():
if "{}::{}".format(a,c) not in formed_c:
conv_frame = ""
for ind, b in enumerate(b_lst):
conv_frame += "{}::{}::{}".format(a, b.replace(' ', '_').replace('-', '_'), c)
if ind < len(b_lst) - 1:
conv_frame += "-"
a_dfs[a]['train_nodes'].append(conv_frame)
a_dfs[a]['label'].append(0)
unformed_edges += 1
print("There were {} unformed edges added.".format(unformed_edges))
for a, a_dict in a_dfs.iteritems():
a_dfs[a] = pd.DataFrame(a_dict)
#End prepping dev data
print("\nBuilding model...")
feature_dim = train_x[0].shape[2]
model, criterion, optimizer = build_model(feature_dim)
# Train the model
print("\nTraining model...")
total_step = len(train_loader)
best_info = {'max_mmrr':0}
evaluate_every = 25
for epoch in range(train_epochs):
for i, (train_x, labels) in enumerate(train_loader):
labels = labels.type(torch.FloatTensor)
labels = labels.view(-1, 1)
links = train_x.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(links)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, train_epochs, i+1, total_step, loss.item()))
if (epoch + 1) % evaluate_every == 0:
###Start Evaluate on the dev set
print("Evaluating on devel...")
map_output = ""
mrr_total = 0.0
#Save the current model
torch.save(model, CUR_PATH)
#Load the last saved best model
lmodel = torch.load(CUR_PATH)
# Test the model
lmodel.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
total_avg_score_ties = 0.0
total_tied_golds = 0
total_untied_golds = 0
tie_lst = []
for a_ind, (a, df_test_a) in enumerate(a_dfs.iteritems()):
test_x, test_labels, original_labels, c_lst = get_input(df_test_a, weights, index_map, combination_method, data_purpose='eval')
test_loader = torch.utils.data.DataLoader(dataset=LinksDataset(test_x, test_labels), batch_size=batch_size, shuffle=False)
with torch.no_grad():
predictions = []
labels = []
pred_lab_lst = []
inp_pred_lst = []
for test_links, test_labels in test_loader:
test_links = test_links.to(device)
outputs = lmodel(test_links)
predicted, _ = torch.max(outputs.data, 1)
predictions.extend([tensor.item() for tensor in predicted])
labels.extend([tensor.item() for tensor in test_labels])
for pred, lab in zip(predictions, labels):
pred_lab_lst.append((pred, lab))
sorted_pred_lab_lst = sorted(pred_lab_lst, key=lambda x: x[0], reverse=True)
y_true = [tup[1] for tup in sorted_pred_lab_lst] #gold
y_scores = [tup[0] for tup in sorted_pred_lab_lst] #predictions
true_inds = [ind for ind in range(len(y_true)) if y_true[ind] == 1]
true_scores = [y_scores[ind] for ind in true_inds]
sorted_scores = sorted(y_scores, reverse=True)
true_ranks = []
scores_cnter = Counter(y_scores)
true_c_scores = [s_ for s_ in scores_cnter.keys() if s_ in true_scores]
avg_score_ties = sum(true_c_scores)/float(len(true_c_scores))
total_avg_score_ties += avg_score_ties
for tc, ts in zip(c_lst, true_scores):
if scores_cnter[ts] == 1:
true_ranks.append((sorted_scores.index(ts) + 1, ts, tc))
total_untied_golds += 1
else:
tie_lst.append((tc, ts, scores_cnter[ts]))
total_tied_golds += 1
ts_index = sorted_scores.index(ts)
#true_ranks.append((random.randint(ts_index + 1, ts_index + scores_cnter[ts]), ts))
#Get median of range
true_ranks.append((( ((ts_index + 1) + (ts_index + scores_cnter[ts]))/2 ), ts))
mrr = np.mean([1.0/tr_[0] for tr_ in true_ranks])
mrr_total += mrr
tp = len([x for x in y_true if x > 0.0])
if a_ind % 500 == 0:
print("{} devel completed at {}.".format(a_ind, datetime.now()))
mean_mrr = mrr_total/len(a_dfs.keys())
print("Total tied gold ranks: {}".format(total_tied_golds))
print("Total untied gold ranks: {}".format(total_untied_golds))
map_o = "MRR: {}.".format(mean_mrr)
map_output = "{}\n{}\n\n{}".format(experiment_name, map_o, map_output)
print(map_o)
if mean_mrr > best_info['max_mmrr']:
print("Saving because {} > {}".format(mean_mrr, best_info['max_mmrr']))
torch.save(model, PATH)
best_info['experiment_name'] = experiment_name
best_info['max_mmrr'] = mean_mrr
best_info['loss_at_best'] = loss.item()
best_info['epoch'] = epoch + 1
best_info['tied_untied'] = "{}/{}".format(total_tied_golds, total_untied_golds)
###End Evaluate on the dev set
print("\nTrain complete. Best info: {}".format(best_info))
train_x = None
train_loader = None
print("\nTesting model...")
index_map, weights = wvd.load(test_embeddings_file_name)
print("reading data...")
test_file_name = test_data
df_test = pd.read_table(test_file_name, dtype={'train_nodes':str})
# remove NaN elements
df_test = df_test.dropna(how='any', axis=0)
#Initial testing will select 1st 100 egs in devel/test data instead of random selection. Read these lines
with open(eval_filename) as inp_file:
chosen_As = {}
for ind, line in enumerate(csv.reader(inp_file, delimiter='\t')): #quoting=csv.QUOTE_NONE - If req to make data work, examine data
a = line[0].replace(' ', '_').replace('-', '_')
cs = line[1].split(';')
chosen_As[a] = 1
if ind >= 999:
break
print("There were {} chosen As".format(len(chosen_As)))
a_dfs = {}
for a in chosen_As.keys():
a_dfs[a] = {'train_nodes': [], 'label':[]}
#Filter test for chosen As which formed (True positives)
formed_c = {}
for ind, row in enumerate(df_test.itertuples()):
input_str = row.train_nodes
conv_rows = input_str.split('-')
link_lst = conv_rows[0].split('::')
a = link_lst[0]
c = link_lst[2]
if a in chosen_As.keys():
a_dfs[a]['train_nodes'].append(row.train_nodes)
a_dfs[a]['label'].append(row.label)
formed_c["{}::{}".format(a,c)] = 1
#Add unformed edges for the chosen As (True negatives)
a_c_regex = r"'(.*?)'"
unformed_edges = 0
if 'json' in unformed_filename:
with open(unformed_filename) as uf:
data = ujson.loads(uf.read())
for ac, b_lst in data.iteritems():
ac_extract = re.findall(a_c_regex, ac)
a = ac_extract[0].replace(' ', '_').replace('-', '_')
c = ac_extract[1].replace(' ', '_').replace('-', '_')
if a in chosen_As.keys():
if "{}::{}".format(a,c) not in formed_c:
conv_frame = ""
for ind, b in enumerate(b_lst):
conv_frame += "{}::{}::{}".format(a, b.replace(' ', '_').replace('-', '_'), c)
if ind < len(b_lst) - 1:
conv_frame += "-"
a_dfs[a]['train_nodes'].append(conv_frame)
a_dfs[a]['label'].append(0)
unformed_edges += 1
print("There were {} unformed edges added.".format(unformed_edges))
for a, a_dict in a_dfs.iteritems():
a_dfs[a] = | pd.DataFrame(a_dict) | pandas.DataFrame |
import ast
import collections
import datetime
import math
import numpy as np
import pandas as pd
from pyspark import Row, SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.functions import col
class Calculator:
def __init__(self):
self.localClusterURL = "local[2]"
self.clusterMasterURL = "spark://Spark:7077"
self.conf = SparkConf().setAppName('Movie_System').setMaster(self.clusterMasterURL)
self.sc = SparkContext.getOrCreate(self.conf)
self.sqlContext = SQLContext(self.sc)
# spark 初始化
# self.sqlContext = SparkSession.Builder().appName('sql').master('spark://Spark:7077').getOrCreate()
# mysql 配置
self.prop = {'user': 'sql_bs_sju_site',
'password': '<PASSWORD>',
'driver': 'com.mysql.cj.jdbc.Driver'}
self.jdbcURL = "jdbc:mysql://127.0.0.1:3306/sql_bs_sju_site" \
"?useUnicode=true&characterEncoding=utf-8&useSSL=false"
# user\rating\links\tags在hdfs中的位置 ===> 即推荐原料在hdfs中的存档路径
self.hdfs_data_path = 'hdfs://Spark:9000/movie_system/'
self.date_time = datetime.datetime.now().strftime("%Y-%m-%d")
def __del__(self):
# 关闭spark会话
self.sc.stop()
del self.sc
def select(self, sql):
# 读取表
data = self.sqlContext.read.jdbc(url=self.jdbcURL, table=sql, properties=self.prop)
return data
# # 离线计算时从mysql加载movies数据到hive中
# movies_sql = self.sqlContext.read.format('jdbc') \
# .options(url=self.jdbcURL,
# driver=self.prop['dirver'],
# dbtable=self.movieTab,
# user=self.prop['user'],
# password=self.prop['password']).load()
def get_data(self, path):
data = self.sqlContext.read.parquet(path)
return data
# def show(self):
# sql = '(select user_id,tag_type,tag_weight,tag_name from user_usertag) aaa'
# data = self.select(sql)
# # 打印data数据类型 <class 'pyspark.sql.dataframe.DataFrame'>
# # print(type(data))
# # 展示数据
# data.show()
def write(self, data, path):
data.write.csv(path + "_csv", header=True, sep=",", mode='overwrite')
data.write.parquet(path, mode='overwrite')
def change_sql_data_to_hdfs(self, sql, path):
data = self.select(sql)
self.write(data, path)
# 根据电影类型、语言、国家、年份计算相似度
def calculator_movie_type(self, read_path, write_path):
dfMovies = self.get_data(read_path)
dfMovies.show()
"""计算两个rdd的笛卡尔积"""
rddMovieCartesianed = dfMovies.rdd.cartesian(dfMovies.rdd)
rddMovieIdAndGenre = rddMovieCartesianed.map(lambda line: Row(movie1=line[0]['movie_id'],
movie2=line[1]['movie_id'],
sim=countSimBetweenTwoMovie(line[0], line[1])))
dfFinal = self.sqlContext.createDataFrame(rddMovieIdAndGenre)
dfFinal.show()
self.write(dfFinal, write_path)
# 根据用户喜好、兴趣、年龄、城市计算相似度
def calculator_user_base(self, read_path, write_path):
dfUsers = self.get_data(read_path)
dfUsers.show()
"""计算两个rdd的笛卡尔积"""
rddUserCartesianed = dfUsers.rdd.cartesian(dfUsers.rdd)
rddUserIdAndGenre = rddUserCartesianed.map(lambda line: Row(user1=line[0]['user_id'],
user2=line[1]['user_id'],
sim=countSimBetweenTwoUser(line[0], line[1])))
dfFinal = self.sqlContext.createDataFrame(rddUserIdAndGenre)
dfFinal.show()
self.write(dfFinal, write_path)
# 根据用户的标签进行计算相似度
def calculator_user_tag(self, read_path, write_path):
dfUsers = self.get_data(read_path)
# dfUsers.show()
# print(change_user_tag_data(dfUsers.toPandas()))
dfUsers = self.sqlContext.createDataFrame(change_user_tag_data(dfUsers.toPandas()))
# dfUsers.show()
# """计算两个rdd的笛卡尔积"""
rddUserCartesianed = dfUsers.rdd.cartesian(dfUsers.rdd)
rddUserIdAndGenre = rddUserCartesianed.map(lambda line: Row(user1=line[0]['user_id'],
user2=line[1]['user_id'],
sim=countSimBetweenTwoUserByTag(line[0], line[1])))
dfFinal = self.sqlContext.createDataFrame(rddUserIdAndGenre)
# dfFinal.show()
self.write(dfFinal, write_path)
@staticmethod
def change_dataframe_to_li(data_frame, li_name):
data_frame = data_frame.toPandas()
data_li = np.array(data_frame[li_name])
data_li = data_li.tolist()
return data_li
# 查找相似的电影
def select_movie_to_movie(self, df_sim_movie, movie_id):
min_sim = 0.5
max_num = 300
df_sim_movie = df_sim_movie.orderBy('sim', ascending=0)\
.where(
(col('movie1') == movie_id) &
(col('sim').__ge__(min_sim)) &
(col('movie1') != col('movie2')))\
.limit(max_num).select('movie2')
df_sim_movie_li = self.change_dataframe_to_li(df_sim_movie, "movie2")
return df_sim_movie_li
# 查找相似的用户
def select_user_to_user(self, df_sim_user, user_id):
min_sim = 0.5
max_num = 300
df_sim_user = df_sim_user.orderBy('sim', ascending=0)\
.where(
(col('user1') == user_id) &
(col('sim').__ge__(min_sim)) &
(col('user1') != col('user2')))\
.limit(max_num).select('user2')
df_sim_user_li = self.change_dataframe_to_li(df_sim_user, "user2")
return df_sim_user_li
# 查询用户收藏、评论、评分良好的电影
def select_user_movie(self, df_users, user_id):
min_score = 3
max_num = 300
df_movies = df_users.orderBy('tag_weight', ascending=0) \
.where((
(col('tag_type') == 'like_movie_id') |
(col('tag_type') == 'rating_movie_id') |
(col('tag_type') == 'comment_movie_id')
# (col('tag_type') == 'rating_movie_id')
) & (col('tag_weight').__ge__(min_score)) & (col('user_id').__eq__(user_id))) \
.limit(max_num).select('tag_name').distinct()
movie_li = self.change_dataframe_to_li(df_movies, "tag_name")
# movie_li = np.array(df_movies.toPandas()["tag_name"])
# movie_li = movie_li.tolist()
return movie_li
def calculator_user_movie_recommend(self, user_path, tag_path, movie_sim_path, user_sim_path, calculator_type):
finial_rs = list()
# 取出用户信息user_id数据
df_users = self.get_data(user_path).select('user_id')
df_users.show()
# 读取用户标签信息
df_tag_users = self.get_data(tag_path)
df_tag_users.show()
# 读取电影相似度数据
df_movie_sim = self.get_data(movie_sim_path)
df_movie_sim.show()
# 读取用户相似度数据
df_user_sim = self.get_data(user_sim_path)
df_user_sim.show()
user_id_li = self.change_dataframe_to_li(df_users, "user_id")
if calculator_type == "movie": # 根据电影查找电影
for id, user_id in enumerate(list(user_id_li)):
user_movie_li = self.select_user_movie(df_tag_users, user_id)
user_movie_li_rs = list()
for user_movie_id in list(user_movie_li):
select_movie_to_movie_li = list(self.select_movie_to_movie(df_movie_sim, user_movie_id))
user_movie_li_rs = list(set(select_movie_to_movie_li + user_movie_li_rs)) # 合并List
user_movie_li_rs = ",".join(map(str, user_movie_li_rs)) # 转成字符串
finial_rs.append([id, user_id, user_movie_li_rs, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')])
elif calculator_type == "user": # 根据用户查找电影
for id, user_id in enumerate(list(user_id_li)):
user_id_li_rs = self.select_user_to_user(df_user_sim, user_id)
user_movie_li_rs = list()
for user_id_rs in list(user_id_li_rs):
select_user_to_movie_li = list(self.select_user_movie(df_tag_users, user_id_rs))
user_movie_li_rs = list(set(select_user_to_movie_li + user_movie_li_rs)) # 合并List
user_movie_li_rs = ",".join(map(str, user_movie_li_rs)) # 转成字符串
finial_rs.append([id, user_id, user_movie_li_rs, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')])
# print(finial_rs)
# 将数据转换
recommend = self.sqlContext.createDataFrame(finial_rs, ["id", "user_id", "movie_id_li", "create_time"])
# 写入数据库
recommend.write.jdbc(url=self.jdbcURL, table='user_usermovierecommend', mode='overwrite', properties=self.prop)
# 将数据从数据库中取出,并以parquet文件格式写入到HDFS里
def step_1(self):
sql1 = '(select user_id,tag_type,tag_weight,tag_name from user_usertag) user_tag_base'
path1 = self.hdfs_data_path + 'user_tag_base_'+self.date_time
self.change_sql_data_to_hdfs(sql1, path1)
# 测试时限制数量防止计算量过大
sql2 = '(SELECT `movie_id`,`title`,`rating`,`genres`,`countries`,`languages`,`year` FROM ' \
'movie_collectmoviedb limit 0,50) movie_base'
path2 = self.hdfs_data_path + 'movie_base_'+self.date_time
self.change_sql_data_to_hdfs(sql2, path2)
# 用户信息
sql3 = '(select d.id user_id, b.user_gender gender, d.user_age age, d.user_prefer prefers, ' \
'd.user_hobbies hobbies, d.user_province province, d.user_city city, d.user_district district ' \
'from user_usersbase b join user_usersdetail d on b.id = d.user_id_id where b.user_status = 1) user_base'
path3 = self.hdfs_data_path + 'user_base_'+self.date_time
self.change_sql_data_to_hdfs(sql3, path3)
# 读取parquet文件,然后计算相似度
def step_2(self):
read_path1 = self.hdfs_data_path + 'user_tag_base_'+self.date_time
write_path1 = self.hdfs_data_path + 'user_tag_simContent_' + self.date_time
read_path2 = self.hdfs_data_path + 'movie_base_' + self.date_time
write_path2 = self.hdfs_data_path + 'movie_simContent_' + self.date_time
read_path3 = self.hdfs_data_path + 'user_base_'+self.date_time
write_path3 = self.hdfs_data_path + 'user_simContent_' + self.date_time
self.calculator_movie_type(read_path2, write_path2)
self.calculator_user_base(read_path3, write_path3)
self.calculator_user_tag(read_path1, write_path1)
# 读取parquet相似度数据,然后生成推荐内容,存储到mysql和HDFS
def step_3(self):
user_base_path = self.hdfs_data_path + 'user_base_'+self.date_time
user_tag_base_path = self.hdfs_data_path + 'user_tag_base_'+self.date_time
movie_sim_path = self.hdfs_data_path + 'movie_simContent_' + self.date_time
user_sim_path = self.hdfs_data_path + 'user_tag_simContent_'+self.date_time
# 根据相似用户进行推荐
# self.calculator_user_movie_recommend(user_base_path, user_tag_base_path,
# movie_sim_path, user_sim_path, "user")
# 根据相似电影进行推荐
self.calculator_user_movie_recommend(user_base_path, user_tag_base_path,
movie_sim_path, user_sim_path, "movie")
def countIntersectionForTwoSets(list1, list2):
"""计算两个集合的交集的模"""
count = 0
for i in range(len(list1)):
m = list1[i]
for j in range(len(list2)):
if list2[j] == m:
count = count + 1
break
return count
def countSimBetweenTwoList(list1, list2):
s1 = len(list1)
s2 = len(list2)
m = math.sqrt(s1 * s2)
return countIntersectionForTwoSets(list1, list2) / m
def countSimBetweenTwoMovie(list1, list2):
"""计算两个Movie的相似度"""
movie_type_list1 = ast.literal_eval(list1['genres'])
movie_type_list2 = ast.literal_eval(list2['genres'])
movie_country_list1 = ast.literal_eval(list1['countries'])
movie_country_list2 = ast.literal_eval(list2['countries'])
movie_language_list1 = ast.literal_eval(list1['languages'])
movie_language_list2 = ast.literal_eval(list2['languages'])
movie_year1 = list1['year']
movie_year2 = list2['year']
movie_year = 1 if movie_year1 == movie_year2 else 0
movie_type = countSimBetweenTwoList(movie_type_list1, movie_type_list2)
movie_country = countSimBetweenTwoList(movie_country_list1, movie_country_list2)
movie_language = countSimBetweenTwoList(movie_language_list1, movie_language_list2)
sim = (movie_type * 5 + movie_country * 2 + movie_language * 2 + movie_year * 1) / 10
return sim
def countSimBetweenTwoUser(list1, list2):
"""计算两个User的相似度"""
user_prefer_list1 = list1['prefers'].split(",")
user_prefer_list2 = list2['prefers'].split(",")
user_hobbie_list1 = list2['hobbies'].split(",")
user_hobbie_list2 = list2['hobbies'].split(",")
user_gender = 1 if list1['gender'] == list2['gender'] else 0
user_province = 1 if list1['province'] == list2['province'] else 0
user_city = 1 if list1['city'] == list2['city'] else 0
user_district = 1 if list1['district'] == list2['district'] else 0
user_prefer = countSimBetweenTwoList(user_prefer_list1, user_prefer_list2)
user_hobbie = countSimBetweenTwoList(user_hobbie_list1, user_hobbie_list2)
sim = (user_prefer * 5 + user_hobbie * 2 + user_gender * 1 + user_province * 1 +
user_city * 0.5 + user_district * 0.5) / 10
return sim
def countSimBetweenTwoDict(info_movie_tag1_dict, info_movie_tag2_dict):
if not info_movie_tag1_dict and not info_movie_tag2_dict or info_movie_tag1_dict == info_movie_tag2_dict:
return 1
key_li1 = list(info_movie_tag1_dict.keys())
key_li2 = list(info_movie_tag2_dict.keys())
content_sim = countSimBetweenTwoList(key_li1, key_li2)
key_score = 0
if key_li1 and key_li2 and key_li1[0] == key_li2[0]:
key_score += 5
if len(key_li1) >= 2 and len(key_li2) >= 2 and key_li1[1] == key_li2[1]:
key_score += 3
if len(key_li1) >= 3 and len(key_li2) >= 3 and key_li1[2] == key_li2[2]:
key_score += 2
key_score = (content_sim + key_score / 10) / 2
return key_score
def countSimBetweenTwoUserByTag(list1, list2):
"""计算两个User的相似度"""
# info_age、info_city、info_phone_city、info_province、info_sex
# info_movie_tag、info_movie_type
# List:like_movie_id、info_hobbies、rating_movie_id
user_tag_list1 = ast.literal_eval(list1['user_data'])
user_tag_list2 = ast.literal_eval(list2['user_data'])
info_age1 = user_tag_list1.get("user_info").get("info_age")
info_city1 = user_tag_list1.get("user_info").get("info_city")
info_phone_city1 = user_tag_list1.get("user_info").get("info_phone_city")
info_province1 = user_tag_list1.get("user_info").get("info_province")
info_sex1 = user_tag_list1.get("user_info").get("info_sex")
info_movie_tag1_dict = user_tag_list1.get("info_movie_tag")
info_movie_type1_dict = user_tag_list1.get("info_movie_type")
like_movie_id1_li = user_tag_list1.get("like_movie_id")
info_hobbies1_li = user_tag_list1.get("info_hobbies")
rating_movie_id1_li = user_tag_list1.get("rating_movie_id")
info_age2 = user_tag_list2.get("user_info").get("info_age")
info_city2 = user_tag_list2.get("user_info").get("info_city")
info_phone_city2 = user_tag_list2.get("user_info").get("info_phone_city")
info_province2 = user_tag_list2.get("user_info").get("info_province")
info_sex2 = user_tag_list2.get("user_info").get("info_sex")
info_movie_tag2_dict = user_tag_list2.get("info_movie_tag")
info_movie_type2_dict = user_tag_list2.get("info_movie_type")
like_movie_id2_li = user_tag_list2.get("like_movie_id")
info_hobbies2_li = user_tag_list2.get("info_hobbies")
rating_movie_id2_li = user_tag_list2.get("rating_movie_id")
if (info_age1 and info_age2 and int(info_age1) in range(int(info_age2)-3, int(info_age2)+3)) or \
(not info_age1 and not info_age2):
info_age = 1
else:
info_age = 0
info_city = is_exist_and_equal(info_city1, info_city2)
info_sex = is_exist_and_equal(info_sex1, info_sex2)
info_phone_city = is_exist_and_equal(info_phone_city1, info_phone_city2)
info_province = is_exist_and_equal(info_province1, info_province2)
like_movie_id_li = is_exist_and_equal_li(like_movie_id1_li, like_movie_id2_li)
info_hobbies_li = is_exist_and_equal_li(info_hobbies1_li, info_hobbies2_li)
rating_movie_id_li = is_exist_and_equal_li(rating_movie_id1_li, rating_movie_id2_li)
if (info_movie_tag1_dict and info_movie_tag2_dict) or (not info_movie_tag1_dict and not info_movie_tag2_dict):
info_movie_tag_dict = countSimBetweenTwoDict(info_movie_tag1_dict, info_movie_tag2_dict)
else:
info_movie_tag_dict = 0
if (info_movie_type1_dict and info_movie_type2_dict) or (not info_movie_type1_dict and not info_movie_type2_dict):
info_movie_type_dict = countSimBetweenTwoDict(info_movie_type1_dict, info_movie_type2_dict)
else:
info_movie_type_dict = 0
sim = (info_movie_tag_dict * 2 + info_movie_type_dict * 2 + like_movie_id_li * 1 + info_hobbies_li * 1 +
rating_movie_id_li * 1 + info_age * 1 + info_city * 0.5 + info_sex * 0.5 + info_phone_city * 0.5 +
info_province * 0.5) / 10
return sim
def is_exist_and_equal_li(str1, str2):
if not str1 and not str2:
return 1
if str1 and str2:
return countSimBetweenTwoList(str1, str2)
else:
return 0
def is_exist_and_equal(str1, str2):
# if (str1 and str2 and str2 == str1) or (not str1 and not str2):
if str2 == str1:
return 1
else:
return 0
# 将用户的数据进行格式化
def change_user_tag_data(data):
list_tag = ["like_movie_id", "info_hobbies", "rating_movie_id"]
dict_tag = ["info_movie_tag", "info_movie_type"]
all_user_data_rs = list()
for user_data in data.groupby(["user_id"]):
user_data_rs = dict()
user_id = user_data[0]
user_data_rs["user_id"] = user_id
user_tag_rs = dict()
user_info_dict = dict()
for user_tag_data in pd.DataFrame(user_data[1]).drop("user_id", axis=1).groupby(["tag_type"]):
if user_tag_data[0] in dict_tag:
# print(user_data[1])
info_movie_tag = pd.DataFrame(user_tag_data[1]).drop(["tag_type"], axis=1).sort_values("tag_weight",
ascending=False)
info_movie_tag = collections.OrderedDict(zip(info_movie_tag["tag_name"], info_movie_tag["tag_weight"]))
user_tag_rs[user_tag_data[0]] = dict(info_movie_tag)
# print(dict(info_movie_tag))
elif user_tag_data[0] in list_tag:
# print(user_data[1])
like_movie_id = pd.DataFrame(user_tag_data[1]).drop(["tag_type"], axis=1).sort_values("tag_weight",
ascending=False)
like_movie_id = list(like_movie_id["tag_name"])
user_tag_rs[user_tag_data[0]] = like_movie_id
# print(like_movie_id)
else:
user_info = | pd.DataFrame(user_tag_data[1]) | pandas.DataFrame |
import pytest
from grasping_position_inference.training.exceptions import DataSetIsEmpty, ModelIsNotTrained
from grasping_position_inference.training.model import Model
import pandas as pd
from mock import patch
DUMMY_FILENAME = 'cup.n.01,BACK,:BACK :BOTTOM,pr2_left_arm.csv'
@patch('grasping_position_inference.training.model.Model._read_data', return_value= | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
##External base packages.
import time
import glob
import os
import pdb
import sys
##External packages.
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from numpy_sugar.linalg import economic_qs, economic_svd
from limix.stats import effsizes_se, lrt_pvalues
from glimix_core.lmm import LMM
from bgen_reader import read_bgen
#Internal code.
import qtl_output
import qtl_loader_utils
import qtl_parse_args
import qtl_utilities as utils
from qtl_snp_qc import do_snp_qc
#V0.1.4
def run_PrsQtl_analysis(pheno_filename, anno_filename, prsFile, output_dir, min_call_rate=0.95, blocksize=1000,
skipAutosomeFiltering = False, gaussianize_method=None, minimum_test_samples= 10, seed=np.random.randint(40000), n_perm=0, write_permutations = False, relatedness_score=None, feature_variant_covariate_filename = None, snps_filename=None, feature_filename=None, snp_feature_filename=None, genetic_range='all',
covariates_filename=None, kinship_filename=None, sample_mapping_filename=None, regressCovariatesUpfront = False):
fill_NaN = Imputer(missing_values=np.nan, strategy='mean', axis=0)
print('Running GRS QT analysis.')
lik = 'normal'
'''Core function to take input and run QTL tests on a given chromosome.'''
if relatedness_score is not None:
relatedness_score = float(relatedness_score)
[phenotype_df, kinship_df, covariate_df, sample2individual_df, annotation_df, snp_filter_df, snp_feature_filter_df, geneticaly_unique_individuals, minimum_test_samples, feature_list, risk_df, chromosome, selectionStart, selectionEnd, feature_variant_covariate_df]=\
utils.run_PrsQtl_analysis_load_intersect_phenotype_covariates_kinship_sample_mapping(pheno_filename=pheno_filename, anno_filename=anno_filename, prsFile=prsFile, skipAutosomeFiltering = skipAutosomeFiltering,
minimum_test_samples= minimum_test_samples, relatedness_score=relatedness_score, snps_filename=snps_filename, feature_filename=feature_filename, snp_feature_filename=snp_feature_filename, selection=genetic_range,
covariates_filename=covariates_filename, kinship_filename=kinship_filename, sample_mapping_filename=sample_mapping_filename, feature_variant_covariate_filename=feature_variant_covariate_filename)
mixed = kinship_df is not None
if (kinship_df is None) or (relatedness_score is None) :
geneticaly_unique_individuals = sample2individual_df['iid'].values
QS = None
if(feature_list==None or len(feature_list)==0):
print ('No features to be tested.')
sys.exit()
#Open output files
qtl_loader_utils.ensure_dir(output_dir)
if not selectionStart is None :
output_writer = qtl_output.hdf5_writer(output_dir+'/qtl_results_{}_{}_{}.h5'.format(chromosome,selectionStart,selectionEnd))
else :
output_writer = qtl_output.hdf5_writer(output_dir+'/qtl_results_{}.h5'.format(chromosome))
if(write_permutations):
if not selectionStart is None :
permutation_writer = qtl_output.hdf5_permutations_writer(output_dir+'/perm_results_{}_{}_{}.h5'.format(chromosome,selectionStart,selectionEnd),n_perm)
else :
permutation_writer = qtl_output.hdf5_permutations_writer(output_dir+'/perm_results_{}.h5'.format(chromosome),n_perm)
#Arrays to store indices of snps tested and pass and fail QC SNPs for features without missingness.
tested_snp_names = []
fail_qc_features = []
alpha_params = []
beta_params = []
n_samples = []
n_e_samples = []
na_containing_features=0
currentFeatureNumber = 0
snpQcInfoMain = None
for feature_id in feature_list:
snpQcInfo = None
currentFeatureNumber+= 1
if (len(phenotype_df.loc[feature_id,:]))<minimum_test_samples:
print("Feature: "+feature_id+" not tested not enough samples do QTL test.")
fail_qc_features.append(feature_id)
geneticaly_unique_individuals = tmp_unique_individuals
continue
data_written = False
contains_missing_samples = False
snpQuery = risk_df.index.values
snp_cov_df = None
if(feature_variant_covariate_df is not None):
if(feature_id in feature_variant_covariate_df['feature'].values):
covariateSnp = feature_variant_covariate_df['snp_id'].values[feature_variant_covariate_df['feature']==feature_id]
if(any(i in risk_df.index.values for i in covariateSnp)):
snp_cov_df = risk_df.loc[risk_df.index.map(lambda x: x in list(covariateSnp)),:].transpose()
if (len(snpQuery) != 0) and (snp_filter_df is not None):
snpQuery = list(set(snp_filter_df.index).intersection(set(snpQuery)))
if (len(snpQuery) != 0) and (snp_feature_filter_df is not None):
snpQuery = list(set(np.unique(snp_feature_filter_df['snp_id'].loc[snp_feature_filter_df['feature']==feature_id])).intersection(set(snpQuery)))
if len(snpQuery) == 0:
print("Feature: "+feature_id+" not tested. No SNPS passed QC for phenotype.")
fail_qc_features.append(feature_id)
continue
else:
phenotype_ds = phenotype_df.loc[feature_id]
contains_missing_samples = any(~np.isfinite(phenotype_ds))
if(contains_missing_samples):
#import pdb; pdb.set_trace()
print ('Feature: ' + feature_id + ' contains missing data.')
phenotype_ds.dropna(inplace=True)
na_containing_features = na_containing_features+1
'''select indices for relevant individuals in genotype matrix
These are not unique. NOT to be used to access phenotype/covariates data
'''
individual_ids = sample2individual_df.loc[phenotype_ds.index,'iid'].values
sample2individual_feature= sample2individual_df.loc[phenotype_ds.index]
if contains_missing_samples:
tmp_unique_individuals = geneticaly_unique_individuals
if (kinship_df is not None) and (relatedness_score is not None):
geneticaly_unique_individuals = utils.get_unique_genetic_samples(kinship_df.loc[individual_ids,individual_ids], relatedness_score);
else :
geneticaly_unique_individuals = individual_ids
if phenotype_ds.empty or len(geneticaly_unique_individuals)<minimum_test_samples :
print("Feature: "+feature_id+" not tested not enough samples do QTL test.")
fail_qc_features.append(feature_id)
if contains_missing_samples:
geneticaly_unique_individuals = tmp_unique_individuals
continue
elif np.var(phenotype_ds.values) == 0:
print("Feature: "+feature_id+" has no variance in selected individuals.")
fail_qc_features.append(feature_id)
if contains_missing_samples:
geneticaly_unique_individuals = tmp_unique_individuals
continue
print ('For feature: ' +str(currentFeatureNumber)+ '/'+str(len(feature_list))+ ' (' + feature_id + '): ' + str(len(snpQuery)) + ' risk scores will be tested.\n Please stand by.')
if(n_perm!=0):
bestPermutationPval = np.ones((n_perm), dtype=np.float)
#Here we need to start preparing the LMM, can use the fam for sample IDS in SNP matrix.
# test if the covariates, kinship, snp and phenotype are in the same order
if ((all(kinship_df.loc[individual_ids,individual_ids].index==sample2individual_feature.loc[phenotype_ds.index]['iid']) if kinship_df is not None else True) &\
(all(phenotype_ds.index==covariate_df.loc[sample2individual_feature['sample'],:].index)if covariate_df is not None else True)):
'''
if all lines are in order put in arrays the correct genotype and phenotype
x=a if cond1 else b <---> equivalent to if cond1: x=a else x=b; better readability of the code
'''
if kinship_df is not None:
kinship_mat = kinship_df.loc[individual_ids,individual_ids].values
kinship_mat = kinship_mat.astype(float)
##GOWER normalization of Kinship matrix.
kinship_mat *= (kinship_mat.shape[0] - 1) / (kinship_mat.trace() - kinship_mat.mean(0).sum())
## This needs to go with the subselection stuff.
if(QS is None and not contains_missing_samples):
QS = economic_qs(kinship_mat)
elif (contains_missing_samples):
QS_tmp = QS
QS = economic_qs(kinship_mat)
if kinship_df is None:
K = np.eye(len(phenotype_ds.index))
if(QS is None and not contains_missing_samples):
QS = economic_qs(K)
elif (contains_missing_samples):
QS_tmp = QS
QS = economic_qs(K)
cov_matrix = covariate_df.loc[sample2individual_feature['sample'],:].values if covariate_df is not None else None
if covariate_df is None:
cov_matrix = np.ones((len(individual_ids), 1))
#pdb.set_trace()
if snp_cov_df is not None:
snp_cov_df_tmp = snp_cov_df.loc[individual_ids,:]
snp_cov_df = pd.DataFrame(fill_NaN.fit_transform(snp_cov_df_tmp))
snp_cov_df.index=sample2individual_feature['sample']
snp_cov_df.columns=snp_cov_df_tmp.columns
cov_matrix = np.concatenate((cov_matrix,snp_cov_df.values),1)
snp_cov_df_tmp = None
snp_cov_df = None
cov_matrix = cov_matrix.astype(float)
else:
print ('There is an issue in mapping phenotypes vs covariates and/or kinship')
sys.exit()
phenotype = utils.force_normal_distribution(phenotype_ds.values,method=gaussianize_method) if gaussianize_method is not None else phenotype_ds.values
#Prepare LMM
phenotype = phenotype.astype(float)
##Mixed and test.
##This is a future change so we don't need to decompose the COVs every time.
##Like QS this needs to happen when genetic unique individuals is the same.
#svd_cov = economic_svd(cov_matrix)
#lmm = LMM(phenotype, cov_matrix, QS, SVD=svd_cov)
#These steps need to happen only once per phenotype.
#print(QS)
lmm = LMM(phenotype, cov_matrix, QS)
if not mixed:
lmm.delta = 1
lmm.fix('delta')
#Prepare null model.
lmm.fit(verbose=False)
if regressCovariatesUpfront:
phenotype_corrected = phenotype-cov_matrix[:,1:].dot(lmm.beta[1:])
cov_matrix_corrected = cov_matrix[:,0]
lmm = LMM(phenotype_corrected, cov_matrix_corrected, QS)
lmm.fit(verbose=False)
null_lml = lmm.lml()
flmm = lmm.get_fast_scanner()
#pdb.set_trace();
for snpGroup in utils.chunker(snpQuery, blocksize):
#Fix seed at the start of the first chunker so all permutations are based on the same random first split.
np.random.seed(seed)
snp_names = snpGroup
tested_snp_names.extend(snp_names)
snp_matrix_DF = risk_df.loc[snp_names,individual_ids].transpose()
##GRS var QC
snp_matrix_DF = snp_matrix_DF.loc[:,snp_matrix_DF.isna().sum(axis=0)!=snp_matrix_DF.shape[0],]
snp_matrix_DF = snp_matrix_DF.loc[:,(np.nanstd(snp_matrix_DF,axis=0)>0)]
# test if the covariates, kinship, snp and phenotype are in the same order
if (len(snp_matrix_DF.index) != len(sample2individual_feature.loc[phenotype_ds.index]['iid']) or not all(snp_matrix_DF.index==sample2individual_feature.loc[phenotype_ds.index]['iid'])):
print ('There is an issue in mapping phenotypes and genotypes')
sys.exit()
#Impute missingness
#pdb.set_trace()
call_rate = 1-snp_matrix_DF.isnull().sum()/len(snp_matrix_DF.index)
if snpQcInfo is None and call_rate is not None:
snpQcInfo = call_rate
elif call_rate is not None:
snpQcInfo = pd.concat([snpQcInfo, call_rate], axis=0)
selection = call_rate > min_call_rate
snp_matrix_DF = snp_matrix_DF.loc[:,list(snp_matrix_DF.columns[selection])]
if snp_matrix_DF.shape[1]==0:
continue
snp_matrix_DF = pd.DataFrame(fill_NaN.fit_transform(snp_matrix_DF),index=snp_matrix_DF.index,columns=snp_matrix_DF.columns)
#
G = snp_matrix_DF.values
G = G.astype(float)
G_index = snp_matrix_DF.columns
alt_lmls, effsizes = flmm.fast_scan(G, verbose=False)
var_pvalues = lrt_pvalues(null_lml, alt_lmls)
var_effsizes_se = effsizes_se(effsizes, var_pvalues)
#add these results to qtl_results
temp_df = pd.DataFrame(index = range(len(G_index)),columns=['feature_id','snp_id','p_value','beta','beta_se','empirical_feature_p_value'])
temp_df['snp_id'] = G_index
temp_df['feature_id'] = feature_id
temp_df['beta'] = np.asarray(effsizes)
temp_df['p_value'] = np.asarray(var_pvalues)
temp_df['beta_se'] = np.asarray(var_effsizes_se)
#insert default dummy value
temp_df['empirical_feature_p_value'] = -1.0
if(n_perm!=0):
pValueBuffer = []
totalSnpsToBeTested = (G.shape[1]*n_perm)
permutationStepSize = np.floor(n_perm/(totalSnpsToBeTested/blocksize))
if(permutationStepSize>n_perm):
permutationStepSize=n_perm
elif(permutationStepSize<1):
permutationStepSize=1
if(write_permutations):
perm_df = pd.DataFrame(index = range(len(G_index)),columns=['snp_id'] + ['permutation_'+str(x) for x in range(n_perm)])
perm_df['snp_id'] = G_index
for currentNperm in utils.chunker(list(range(1, n_perm+1)), permutationStepSize):
if (kinship_df is not None) and (relatedness_score is not None):
temp = utils.get_shuffeld_genotypes_preserving_kinship(geneticaly_unique_individuals, relatedness_score, snp_matrix_DF,kinship_df.loc[individual_ids,individual_ids], len(currentNperm))
else :
temp = utils.get_shuffeld_genotypes(snp_matrix_DF, len(currentNperm))
temp = temp.astype(float)
alt_lmls_p, effsizes_p = flmm.fast_scan(temp, verbose=False)
var_pvalues_p = lrt_pvalues(null_lml, alt_lmls_p)
pValueBuffer.extend(np.asarray(var_pvalues_p))
if(not(len(pValueBuffer)==totalSnpsToBeTested)):
#print(len(pValueBuffer))
#print(pValueBuffer)
#print(totalSnpsToBeTested)
print('Error in blocking logic for permutations.')
sys.exit()
perm = 0
for relevantOutput in utils.chunker(pValueBuffer,G.shape[1]) :
if(write_permutations):
perm_df['permutation_'+str(perm)] = relevantOutput
if(bestPermutationPval[perm] > min(relevantOutput)):
bestPermutationPval[perm] = min(relevantOutput)
perm = perm+1
#print(relevantOutput)
#print('permutation_'+str(perm))
if not temp_df.empty :
data_written = True
output_writer.add_result_df(temp_df)
if(write_permutations):
permutation_writer.add_permutation_results_df(perm_df,feature_id)
#This we need to change in the written file.
if(n_perm>1 and data_written):
#updated_permuted_p_in_hdf5(bestPermutationPval, feature_id);
alpha_para, beta_para = output_writer.apply_pval_correction(feature_id,bestPermutationPval,False)
alpha_params.append(alpha_para)
beta_params.append(beta_para)
#pdb.set_trace();
if not data_written :
fail_qc_features.append(feature_id)
else:
n_samples.append(phenotype_ds.size)
n_e_samples.append(len(geneticaly_unique_individuals))
if contains_missing_samples:
QS = QS_tmp
geneticaly_unique_individuals = tmp_unique_individuals
snpQcInfo = snpQcInfo.to_frame(name="call_rate")
snpQcInfo.index.name = "snp_id"
snpQcInfo.to_csv(output_dir+'/snp_qc_metrics_naContaining_feature_{}.txt'.format(feature_id),sep='\t')
del QS_tmp
del tmp_unique_individuals
else:
if (snpQcInfo is not None and snpQcInfoMain is not None):
snpQcInfoMain = pd.concat([snpQcInfoMain, snpQcInfo], axis=0)
elif snpQcInfo is not None :
snpQcInfoMain = snpQcInfo.copy(deep=True)
#print('step 5')
output_writer.close()
if(write_permutations):
permutation_writer.close()
fail_qc_features = np.unique(fail_qc_features)
if((len(feature_list)-len(fail_qc_features))==0):
time.sleep(15)
#Safety timer to make sure the file is unlocked.
print("Trying to remove the h5 file. Nothing has been tested.")
print(output_dir+'qtl_results_{}_{}_{}.h5'.format(chromosome,selectionStart,selectionEnd))
if not selectionStart is None :
os.remove(output_dir+'qtl_results_{}_{}_{}.h5'.format(chromosome,selectionStart,selectionEnd))
else :
os.remove(output_dir+'qtl_results_{}.h5'.format(chromosome))
sys.exit()
#gather unique indexes of tested snps
#write annotation and snp data to file
snp_df = | pd.DataFrame() | pandas.DataFrame |
from collections import defaultdict
import json
import re
import sys
import time
import matplotlib.pyplot as plt
from itertools import permutations
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import pdist
from scipy.stats import lognorm
import seaborn as sns
from sklearn.cluster import DBSCAN
import statsmodels.nonparametric.api as smnp
#############################################################################
### Parameters
### Theoretical scale markers
### PYT = Pythagorean tuning
### EQ{N} = N-Tone Equal Temperament
### JI = Just intonation
### CHINA = Shi-er-lu
### The rest are sourced from Rechberger, Herman
PYT_INTS = np.array([0., 90.2, 203.9, 294.1, 407.8, 498.1, 611.7, 702., 792.2, 905., 996.1, 1109.8, 1200.])
EQ5_INTS = np.linspace(0, 1200, num=6, endpoint=True, dtype=float)
EQ7_INTS = np.linspace(0, 1200, num=8, endpoint=True, dtype=float)
EQ9_INTS = np.linspace(0, 1200, num=10, endpoint=True, dtype=float)
EQ10_INTS = np.linspace(0, 1200, num=11, endpoint=True, dtype=float)
EQ12_INTS = np.linspace(0, 1200, num=13, endpoint=True, dtype=float)
EQ24_INTS = np.linspace(0, 1200, num=25, endpoint=True, dtype=float)
EQ53_INTS = np.linspace(0, 1200, num=54, endpoint=True, dtype=float)
JI_INTS = np.array([0., 111.7, 203.9, 315.6, 386.3, 498.1, 590.2, 702., 813.7, 884.4, 1017.6, 1088.3, 1200.])
SLENDRO = np.array([263., 223., 253., 236., 225.])
PELOG = np.array([167., 245., 125., 146., 252., 165., 100.])
DASTGAH = np.array([0., 90., 133.23, 204., 294.14, 337.14, 407.82, 498., 568.72, 631.28, 702., 792.18, 835.2, 906., 996., 1039.1, 1109.77, 1200.])
TURKISH = {'T':203.8, 'K':181.1, 'S':113.2, 'B':90.6, 'F':22.6, 'A':271, 'E':67.9}
KHMER_1 = np.array([185., 195., 105., 195., 195., 185., 140.])
KHMER_2 = np.array([190., 190., 130., 190., 190., 190., 120.])
VIET = np.array([0., 175., 200., 300., 338., 375., 500., 520., 700., 869., 900., 1000., 1020., 1200.])
CHINA = np.array([0., 113.67291609, 203.91000173, 317.73848174, 407.83554758, 520.68758457, 611.71791523, 701.95500087, 815.62791696, 905.8650026 , 1019.47514332, 1109.76982292, 1201.27828039])
### Maximum allowable deviation from a perfect octave
### i.e., scale is included if the intervals sum to 1200 +- OCT_CUT
OCT_CUT = 50
#############################################################################
### Functions to be used in reformatting the data
def get_cents_from_ratio(ratio):
return 1200.*np.log10(ratio)/np.log10(2)
def str_to_ints(st, delim=';'):
return [int(s) for s in st.split(delim) if len(s)]
def ints_to_str(i):
return ';'.join([str(x) for x in i])
def get_all_ints(df, old='pair_ints', new='all_ints2'):
def fn(pi):
ints = np.array(str_to_ints(pi))
return ints_to_str([x for i in range(len(ints)) for x in np.cumsum(np.roll(ints,i))[:-1]])
df[new] = df[old].apply(fn)
return df
#############################################################################
### Clusting the scales by the distance between interval sets
def find_min_pair_int_dist(b, c):
dist = 0.0
for i in range(len(b)):
dist += np.min(np.abs(c-b[i]))
return dist
def pair_int_distance(pair_ints):
pair_dist = np.zeros((len(pair_ints), len(pair_ints)), dtype=float)
for i in range(len(pair_ints)):
for j in range(len(pair_ints)):
dist1 = find_min_pair_int_dist(pair_ints[i], pair_ints[j])
dist2 = find_min_pair_int_dist(pair_ints[j], pair_ints[i])
pair_dist[i,j] = (dist1 + dist2) * 0.5
return pair_dist
def cluster_pair_ints(df, n_clusters):
pair_ints = np.array([np.array([float(x) for x in y.split(';')]) for y in df.pair_ints])
pair_dist = pair_int_distance(pair_ints)
li = linkage(pdist(pair_dist), 'ward')
return fcluster(li, li[-n_clusters,2], criterion='distance')
def label_scales_by_cluster(df, n=16):
nc = cluster_pair_ints(df, n)
df[f"cl_{n:02d}"] = nc
return df
#############################################################################
### Functions for extracting and reformatting the raw data
### Encode a scale as a binary string:
### If the first character is 0, then the first potential note in the scale is
### not played. If it is 1, then it is played.
### E.g. The major scale in 12-TET is given by 010110101011
### The intervals are then retrieved by comparing the mask with the correct tuning system
def reformat_scales_as_mask(df):
df['Intervals'] = df['Intervals'].astype(str)
st = '000000000000001'
fn = lambda x: '1' + ''.join([st[-int(i):] for i in x])
idx = df.loc[df.Tuning.apply(lambda x: x not in ['Unique', 'Turkish', '53-tet'])].index
df.loc[idx, 'mask'] = df.loc[idx, 'Intervals'].apply(fn)
fn = lambda x: '1' + ''.join([st[-int(i):] for i in x.split(';')])
idx = df.loc[df.Tuning=='53-tet'].index
df.loc[idx, 'mask'] = df.loc[idx, 'Intervals'].apply(fn)
return df
def reformat_surjodiningrat(df):
for row in df.itertuples():
ints = [get_cents_from_ratio(float(row[i+3])/float(row[i+2])) for i in range(7) if row[i+3] != 0]
df.loc[row[0], 'pair_ints'] = ';'.join([str(int(round(x))) for x in ints])
df['Reference'] = 'Surjodiningrat'
df['Theory'] = 'N'
df = df.drop(columns=[str(x) for x in range(1,9)])
return df
def reformat_original_csv_data(df):
new_df = pd.DataFrame(columns=['Name', 'Intervals', 'Culture', 'Region', 'Country', 'Tuning', 'Reference', 'RefID', 'Theory'])
for i, col in enumerate(df.columns):
tuning = df.loc[0, col]
culture = df.loc[1, col]
cont = df.loc[2, col]
country = df.loc[3, col]
ref = df.loc[4, col]
refid = df.loc[5, col]
theory = df.loc[6, col]
try:
int(col)
name = '_'.join([culture, col])
except:
name = col
ints = ';'.join([str(int(round(float(x)))) for x in df.loc[7:, col] if not str(x)=='nan'])
new_df.loc[i] = [name, ints, culture, cont, country, tuning, ref, refid, theory]
return new_df
def update_scale_data(data_dict, scale, name, country, culture, tuning, cont, ref, refID, theory):
data_dict['Name'].append(name)
data_dict['scale'].append(scale)
data_dict['all_ints'].append([scale[i] - scale[j] for j in range(len(scale)) for i in range(j+1,len(scale))])
data_dict['pair_ints'].append([scale[j+1] - scale[j] for j in range(len(scale)-1)])
data_dict['Tuning'].append(tuning)
data_dict['Country'].append(country)
data_dict['Culture'].append(culture)
data_dict['Region'].append(cont)
data_dict['Reference'].append(ref)
data_dict['RefID'].append(refID)
data_dict['Theory'].append(theory)
return data_dict
def scale_matching_fn(row):
# Only some tuning systems use 'mask'
try:
idx = np.where(np.array([int(x) for x in row.mask]))[0]
except TypeError:
pass
for tun in row.Tuning.split(';'):
if tun == '12-tet':
yield EQ12_INTS[idx]
elif tun == '53-tet':
yield EQ53_INTS[idx]
elif tun == 'Just':
yield JI_INTS[idx]
elif tun == 'Pythagorean':
yield PYT_INTS[idx]
elif tun == 'Arabic':
yield EQ24_INTS[idx]
elif tun == 'Dastgah-ha':
yield DASTGAH[idx]
elif tun == 'Vietnamese':
yield VIET[idx]
elif tun == 'Chinese':
yield CHINA[idx]
elif tun == 'Turkish':
yield np.cumsum([0.0] + [TURKISH[a] for a in row.Intervals])
elif tun == 'Khmer':
for KHM in [KHMER_1, KHMER_2]:
base = KHM[[i-1 for i in idx[1:]]]
for i in range(len(base)):
yield np.cumsum([0.] + np.roll(KHM,i))
def process_scale(scale):
scale = scale.astype(int)
adj_ints = np.diff(scale).astype(int)
N = len(adj_ints)
all_ints1 = np.array([i for j in range(len(scale)-1) for i in np.cumsum(adj_ints[j:])])
all_ints2 = np.array([i for j in range(len(scale)) for i in np.cumsum(np.roll(adj_ints, j))])
return adj_ints, N, scale, all_ints1, all_ints2
def match_scales_to_tunings(df):
df = reformat_scales_as_mask(df.copy())
cols = list(df.columns[:-1])
cols[2:2] = ['n_notes', 'scale', 'all_ints1', 'all_ints2']
new_df = pd.DataFrame(columns=cols)
for row in df.itertuples():
for scale in scale_matching_fn(row):
adj_ints, N, scale, all_ints1, all_ints2 = process_scale(scale)
vals = list(row)[1:-1]
vals[1] = adj_ints
vals[2:2] = [N, scale, all_ints1, all_ints2]
new_df.loc[len(new_df)] = vals
return new_df
def extract_scale_using_tonic(ints, tonic, oct_cut):
# If in str or list format, there are explicit instructions
# for each interval
# Otherwise, there is simply a starting note, and it should
# not go beyond a single octave
if isinstance(tonic, str):
tonic = np.array(str_to_ints(tonic))
tmin, tmax = min(tonic), max(tonic)
elif isinstance(tonic, (list, np.ndarray)):
tmin, tmax = min(tonic), max(tonic)
elif isinstance(tonic, (int, float)):
i_tonic = int(tonic) - 1
tonic = np.zeros(len(ints)+1)
tonic[i_tonic] = 1
tonic[-1] = 2
tmin, tmax = 1, 2
scale = []
for i, t1, t2 in zip(ints, tonic[:-1], tonic[1:]):
if t1 == tmin:
if len(scale):
yield np.array(scale)
scale = [0, i]
elif len(scale):
scale.append(i + scale[-1])
if scale[-1] > (1200 - OCT_CUT):
yield np.array(scale)
def extract_specific_modes(ints, tonic, modes):
if isinstance(tonic, str):
tonic = np.array(str_to_ints(tonic), int)
for m in modes.split(','):
m = str_to_ints(m)
extra = 0
scale = []
for i, t in zip(ints, tonic[:-1]):
if t == m[0]:
if len(scale):
if scale[-1] > (1200 - OCT_CUT):
yield np.array(scale)
scale = [0, i]
elif len(scale) and t in m:
scale.append(scale[-1] + i)
elif len(scale):
scale[-1] = scale[-1] + i
if scale[-1] > (1200 - OCT_CUT):
yield np.array(scale)
def eval_tonic(tonic):
if isinstance(tonic, str):
return tonic != 'N/A'
elif isinstance(tonic, (int, float)):
return not np.isnan(tonic)
def extract_scale(row, oct_cut=OCT_CUT, use_mode=False):
ints = np.array(row.Intervals)
# This column exists only for this instruction;
# If 'Y', then add the final interval needed for the scale
# to add up to an octave;
# See paper and excel file for more details
if row.Octave_modified == 'Y':
final_int = 1200 - sum(ints)
yield np.array([0.] + list(np.cumsum(list(ints) + [final_int])))
return
# Point of confusion here... clear it up
if not use_mode:
try:
for scale in extract_specific_modes(ints, row.Tonic, row.Modes):
yield scale
return
except AttributeError:
pass
# If the entry includes information on tonality, and if
# not using modes, follow the instructions given
if not use_mode:
if eval_tonic(row.Tonic):
for scale in extract_scale_using_tonic(ints, row.Tonic, oct_cut):
if abs(1200 - scale[-1]) <= oct_cut:
yield scale
return
if sum(ints) >= (1200 - oct_cut):
start_from = 0
for i in range(len(ints)):
if i < start_from:
continue
sum_ints = np.cumsum(ints[i:], dtype=int)
# If the total sum of ints is less than the cutoff, ignore this entry
if sum_ints[-1] < (1200 - OCT_CUT):
break
# Find the scale degree by finding the note closest to 1200
idx_oct = np.argmin(np.abs(sum_ints-1200))
oct_val = sum_ints[idx_oct]
# If the total sum of ints is greater than the cutoff, move
# on to the next potential scale
if abs(oct_val - 1200) > OCT_CUT:
continue
# If modes are not being used (i.e., if each interval is only
# allowed to be counted in a scale once) then start looking
# for new scales from this index
if not use_mode:
start_from = idx_oct + i + 1
yield np.array([0.] + list(sum_ints[:idx_oct+1]))
def extract_scales_from_measurements(df, oct_cut=OCT_CUT, use_mode=False):
if isinstance(df.loc[0, 'Intervals'], str):
df.Intervals = df.Intervals.apply(str_to_ints)
cols = list(df.columns)
cols[2:2] = ['n_notes', 'scale', 'all_ints1', 'all_ints2']
new_df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
"""
Name : c9_14_get_stock_return_matrix_from_yanMonthly.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import numpy as np
import scipy as sp
import pandas as pd
#
n_stocks=10
x=pd.read_pickle('c:/temp/yanMonthly.pkl')
x2=sp.unique(np.array(x.index))
x3=x2[x2<'ZZZZ'] # remove all indices
sp.random.seed(1234567)
nonStocks=['GOLDPRICE','HML','SMB','Mkt_Rf','Rf','Russ3000E_D','US_DEBT','Russ3000E_X','US_GDP2009dollar','US_GDP2013dollar']
x4=list(x3)
for i in range(len(nonStocks)):
x4.remove(nonStocks[i])
k=sp.random.uniform(low=1,high=len(x4),size=n_stocks)
y,s=[],[]
for i in range(n_stocks):
index=int(k[i])
y.append(index)
s.append(x4[index])
final=sp.unique(y)
print(s)
def ret_f(ticker):
a=x[x.index==ticker]
p=sp.array(a['VALUE'])
ddate=a['DATE']
ret=p[1:]/p[:-1]-1
output= | pd.DataFrame(ret,index=ddate[1:]) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": | pandas.StringDtype() | pandas.StringDtype |
import os
import sys
import argparse
import shlex
from pprint import pprint
from copy import deepcopy
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.grid_search import ParameterGrid
from clusterlib.scheduler import queued_or_running_jobs
from clusterlib.scheduler import submit
from clusterlib.storage import sqlite3_loads
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from main import WORKING_DIR
from main import make_hash
from main import parse_arguments
from main import get_sqlite3_path
from utils import scale
LOG_DIRECTORY = os.path.join(WORKING_DIR, "logs")
ALL_FLUORESCENCE = [os.path.join(WORKING_DIR, "datasets", x)
for x in os.listdir(os.path.join(WORKING_DIR,
"datasets"))
if (x.startswith("fluorescence_"))]
ALL_NETWORKS = [os.path.basename(os.path.splitext(x)[0]).split("_", 1)[1]
for x in ALL_FLUORESCENCE]
OUTPUT_DIR = os.path.join(WORKING_DIR, "submission")
NORMAL = [{
"output_dir": [OUTPUT_DIR],
"network": [network],
"fluorescence": [fluorescence],
"method": ["simple", "tuned"],
"directivity": [0, 1],
} for fluorescence, network in zip(ALL_FLUORESCENCE, ALL_NETWORKS)]
HIDDEN_NEURON = [{
"output_dir": [OUTPUT_DIR],
"network": [network],
"fluorescence": [fluorescence],
"method": ["simple", "tuned"],
"directivity": [0, 1],
"killing": range(1, 11),
}
for fluorescence, network in zip(ALL_FLUORESCENCE, ALL_NETWORKS)
if network in ("normal-3", "normal-4")
]
PARAMETER_GRID = ParameterGrid(NORMAL + HIDDEN_NEURON)
TIME = dict()
MEMORY = dict()
def _roc_auc_score(y_true, y_score):
try:
return roc_auc_score(y_true, y_score)
except ValueError:
return np.nan
METRICS = {
"roc_auc_score": _roc_auc_score,
"average_precision_score": average_precision_score,
}
def compute_scores(f_ground_truth, f_prediction, parameters):
# Load ground truth
raw_graph = np.loadtxt(f_ground_truth, delimiter=",")
row = raw_graph[:, 0] - 1
col = raw_graph[:, 1] - 1
data = raw_graph[:, 2]
valid_index = data > 0
y_true = coo_matrix((data[valid_index],
(row[valid_index], col[valid_index])),
shape=(1000, 1000))
y_true = y_true.toarray()
if parameters.get("killing", None):
# load name_kill_var
killing_file = os.path.join(WORKING_DIR, "datasets", "hidden-neurons",
"{0}_kill_{1}.txt"
"".format(parameters["network"],
parameters["killing"]))
kill = np.loadtxt(killing_file, dtype=np.int)
# make a mask
alive = np.ones((y_true.shape[0],), dtype=bool)
alive[kill - 1] = False # we need to make -1 since it's matlab indexing
y_true = y_true[alive][:, alive]
# Load predictions
rows = []
cols = []
scores = []
with open(f_prediction) as fhandle:
fhandle.next()
for line in fhandle:
line = line.strip()
prefix, score = line.rsplit(",", 1)
scores.append(float(score))
row, col = prefix.split("_")[-2:]
rows.append(int(row) - 1)
cols.append(int(col) - 1)
y_scores = scale(coo_matrix((scores, (rows, cols))).toarray())
print(y_true.shape)
print(y_scores.shape)
# Compute scores
measures = dict((name, metric(y_true.ravel(), y_scores.ravel()))
for name, metric in METRICS.items())
return measures
if __name__ == "__main__":
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', default=False, action="store_true")
parser.add_argument('-v', '--verbose', default=False, action="store_true")
parser.add_argument('-s', '--scores', default=False, action="store_true",
help="compute scores")
args = vars(parser.parse_args())
# Create log direcotyr if needed
if not os.path.exists(LOG_DIRECTORY):
os.makedirs(LOG_DIRECTORY)
# Get running jobs
all_jobs_running = set(queued_or_running_jobs())
all_jobs_done = sqlite3_loads(get_sqlite3_path())
# Intialize some counter for reporting
n_jobs_running = 0
n_jobs_done = 0
n_jobs_launched = 0
results = []
# Launch if necessary experiments
for parameters in PARAMETER_GRID:
job_hash = make_hash(parameters)
if job_hash in all_jobs_running:
n_jobs_running +=1
elif job_hash in all_jobs_done:
n_jobs_done += 1
if args["scores"]:
fname = os.path.join(OUTPUT_DIR, "%s.csv" % job_hash)
network = parameters["network"]
if "normal-" in parameters["network"]:
network = parameters["network"][:len("normal-") + 1]
elif "test" in parameters["network"]:
continue
elif "valid" in parameters['network']:
continue
else:
raise ValueError("Unknown network")
ground_truth = os.path.join(WORKING_DIR, "datasets",
"network_%s.txt" % network)
measure = compute_scores(ground_truth, fname, parameters)
row = deepcopy(parameters)
row.update(measure)
pprint(row)
results.append(row)
else:
n_jobs_launched += 1
cmd_parameters = " ".join("--%s %s" % (key, parameters[key])
for key in sorted(parameters))
scripts_args = parse_arguments(shlex.split(cmd_parameters))
if make_hash(scripts_args) != job_hash:
pprint(scripts_args)
pprint(parameters)
raise ValueError("hash are not equal, all parameters are "
"not specified.")
cmd = submit(job_command="%s main.py %s"
% (sys.executable, cmd_parameters),
job_name=job_hash,
time="100:00:00",
memory=24000,
log_directory=LOG_DIRECTORY,
backend="slurm")
if not args["debug"]:
os.system(cmd)
elif args["verbose"]:
print("[launched] %s" % job_hash)
print(cmd)
if os.path.exists(os.path.join(LOG_DIRECTORY,
"%s.txt" % job_hash)):
os.system("cat %s" % os.path.join(LOG_DIRECTORY,
"%s.txt" % job_hash))
print("\nSummary launched")
print("------------------")
print("n_jobs_runnings = %s" % n_jobs_running)
print("n_jobs_done = %s" % n_jobs_done)
print("n_jobs_launched = %s" % n_jobs_launched)
if args["scores"]:
import pandas as pd
results = | pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Master_NBA_Predictive_Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16mdsw4rUN3jcKETlA4rHSXlp1Hjr4raK
"""
from argparse import ArgumentParser
import pandas as pd
import random as rnd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
MULTIPLE_SEASON = pd.read_excel('data/output_multiple_season.xlsx')
def calculate(home_team, away_team):
pd.options.display.float_format = '{:.2f}'.format
homeCourtAdvantage = rnd.randint(3, 5)
url = "https://www.basketball-reference.com/teams/{}/2020_games.html".format(
home_team)
dfs = pd.read_html(url)
home_team_df = pd.concat(dfs)
home_team_df = home_team_df[pd.notnull(home_team_df['Tm'])]
home_team_df.columns = ['Game', 'Date', 'Time', 'Drop', 'Drop2', 'Drop3',
'Opponent', 'Drop4', 'Drop5', 'Team_Points',
'Opponent_Points', 'W', 'L', 'Streak', 'Notes']
home_team_df = home_team_df.drop(
columns=['Game', 'Date', 'Time', 'Drop', 'Drop2', 'Drop3', 'Drop4',
'Drop5', 'W', 'L', 'Streak', 'Notes'])
home_team_df = home_team_df[home_team_df.Team_Points != 'Tm']
home_team_df["Team_Points"] = pd.to_numeric(home_team_df["Team_Points"])
home_team_df["Opponent_Points"] = pd.to_numeric(
home_team_df["Opponent_Points"])
url2 = "https://www.basketball-reference.com/teams/{}/2020_games.html".format(
away_team)
dfs2 = pd.read_html(url2)
away_team_df = pd.concat(dfs2)
away_team_df = away_team_df[pd.notnull(away_team_df['Tm'])]
away_team_df.columns = ['Game', 'Date', 'Time', 'Drop', 'Drop2', 'Drop3',
'Opponent', 'Drop4', 'Drop5', 'Team_Points',
'Opponent_Points', 'W', 'L', 'Streak', 'Notes']
away_team_df = away_team_df.drop(
columns=['Game', 'Date', 'Time', 'Drop', 'Drop2', 'Drop3', 'Drop4',
'Drop5', 'W', 'L', 'Streak', 'Notes'])
away_team_df = away_team_df[away_team_df.Team_Points != 'Tm']
away_team_df["Team_Points"] = pd.to_numeric(away_team_df["Team_Points"])
away_team_df["Opponent_Points"] = pd.to_numeric(
away_team_df["Opponent_Points"])
homeTeamPoints = home_team_df.Team_Points.mean()
homeTeamPointsDev = home_team_df.Team_Points.std()
opphomeTeamPoints = home_team_df.Opponent_Points.mean()
opphomeTeamPointsDev = home_team_df.Opponent_Points.std()
awayTeamPoints = away_team_df.Team_Points.mean()
awayTeamPointsDev = away_team_df.Team_Points.std()
oppawayTeamPoints = away_team_df.Opponent_Points.mean()
oppawayTeamPointsDev = away_team_df.Opponent_Points.std()
def gameSim():
homeTeamScore = (((rnd.gauss(homeTeamPoints,
homeTeamPointsDev)) + rnd.gauss(
opphomeTeamPoints, opphomeTeamPointsDev)) / 2 + homeCourtAdvantage)
awayTeamScore = ((rnd.gauss(awayTeamPoints,
awayTeamPointsDev)) + rnd.gauss(
oppawayTeamPoints, oppawayTeamPointsDev)) / 2
if int(round(homeTeamScore)) > int(round(awayTeamScore)):
return 1, homeTeamScore, awayTeamScore
elif int(round(homeTeamScore)) < int(round(awayTeamScore)):
return -1, homeTeamScore, awayTeamScore
else:
return 0, homeTeamScore, awayTeamScore
def gamesSim(ns):
gamesout = []
team1win = 0
team2win = 0
tie = 0
global avgHomeTeamScore
global avgAwayTeamScore
avgHomeTeamScore = 0
avgAwayTeamScore = 0
for i in range(ns):
gm, homeTeamScore, awayTeamScore = gameSim()
avgHomeTeamScore = (homeTeamScore + avgHomeTeamScore)
avgAwayTeamScore = (awayTeamScore + avgAwayTeamScore)
gamesout.append(gm)
if gm == 1:
team1win += 1
return avgHomeTeamScore, avgAwayTeamScore
elif gm == -1:
team2win += 1
return avgHomeTeamScore, avgAwayTeamScore
else:
tie += 1
return avgHomeTeamScore, avgAwayTeamScore
gamesSim(100000)
print('progress', 10)
url3 = "http://www.espn.com/nba/hollinger/teamstats"
df_list = pd.read_html(url3)
addteamData = df_list[0]
addteamData = pd.DataFrame(addteamData)
addteamData = addteamData[[1, 10, 11]]
addteamData.columns = ['TEAM', 'Offensive_Effeciency',
'Defensive_Effeciency']
addteamData.loc[addteamData.TEAM == 'Atlanta', 'TEAM'] = "ATL"
addteamData.loc[addteamData.TEAM == 'Boston', 'TEAM'] = "BOS"
addteamData.loc[addteamData.TEAM == 'Brooklyn', 'TEAM'] = "BRK"
addteamData.loc[addteamData.TEAM == 'Charlotte', 'TEAM'] = "CHA"
addteamData.loc[addteamData.TEAM == 'Chicago', 'TEAM'] = "CHI"
addteamData.loc[addteamData.TEAM == 'Cleveland', 'TEAM'] = "CLE"
addteamData.loc[addteamData.TEAM == 'Dallas', 'TEAM'] = "DAL"
addteamData.loc[addteamData.TEAM == 'Denver', 'TEAM'] = "DEN"
addteamData.loc[addteamData.TEAM == 'Detroit', 'TEAM'] = "DET"
addteamData.loc[addteamData.TEAM == 'Golden State', 'TEAM'] = "GSW"
addteamData.loc[addteamData.TEAM == 'Houston', 'TEAM'] = "HOU"
addteamData.loc[addteamData.TEAM == 'Indiana', 'TEAM'] = "IND"
addteamData.loc[addteamData.TEAM == 'LA Clippers', 'TEAM'] = "LAC"
addteamData.loc[addteamData.TEAM == 'Memphis', 'TEAM'] = "MEM"
addteamData.loc[addteamData.TEAM == 'Miami', 'TEAM'] = "MIA"
addteamData.loc[addteamData.TEAM == 'Milwaukee', 'TEAM'] = "MIL"
addteamData.loc[addteamData.TEAM == 'Minnesota', 'TEAM'] = "MIN"
addteamData.loc[addteamData.TEAM == 'New Orleans', 'TEAM'] = "NOP"
addteamData.loc[addteamData.TEAM == 'New York', 'TEAM'] = "NYK"
addteamData.loc[addteamData.TEAM == 'Oklahoma City', 'TEAM'] = "OKC"
addteamData.loc[addteamData.TEAM == 'Orlando', 'TEAM'] = "ORL"
addteamData.loc[addteamData.TEAM == 'Philadelphia', 'TEAM'] = "PHI"
addteamData.loc[addteamData.TEAM == 'Phoenix', 'TEAM'] = "PHO"
addteamData.loc[addteamData.TEAM == 'Portland', 'TEAM'] = "POR"
addteamData.loc[addteamData.TEAM == 'Sacramento', 'TEAM'] = "SAC"
addteamData.loc[addteamData.TEAM == 'San Antonio', 'TEAM'] = "SAS"
addteamData.loc[addteamData.TEAM == 'LA Lakers', 'TEAM'] = "LAL"
addteamData.loc[addteamData.TEAM == 'Toronto', 'TEAM'] = "TOR"
addteamData.loc[addteamData.TEAM == 'Utah', 'TEAM'] = "UTA"
addteamData.loc[addteamData.TEAM == 'Washington', 'TEAM'] = "WAS"
url3 = "https://www.espn.com/nba/stats/team"
df_list = pd.read_html(url3)
moreteamData = df_list[1]
teamList = df_list[0]
moreteamData = pd.DataFrame(moreteamData)
teamList.columns
moreteamData.columns
df = teamList.join(moreteamData, how='outer')
dfNew = df[['Team', 'STL', 'BLK', 'AST', 'TO', 'FG%']]
dfNew.columns = ['TEAM', 'STL', 'BLK', 'AST', 'TO', 'FG%']
dfNew.loc[dfNew.TEAM == 'Atlanta Hawks', 'TEAM'] = "ATL"
dfNew.loc[dfNew.TEAM == 'Boston Celtics', 'TEAM'] = "BOS"
dfNew.loc[dfNew.TEAM == 'Brooklyn Nets', 'TEAM'] = "BRK"
dfNew.loc[dfNew.TEAM == 'Charlotte Hornets', 'TEAM'] = "CHA"
dfNew.loc[dfNew.TEAM == 'Chicago Bulls', 'TEAM'] = "CHI"
dfNew.loc[dfNew.TEAM == 'Cleveland Cavaliers', 'TEAM'] = "CLE"
dfNew.loc[dfNew.TEAM == 'Dallas Mavericks', 'TEAM'] = "DAL"
dfNew.loc[dfNew.TEAM == 'Denver Nuggets', 'TEAM'] = "DEN"
dfNew.loc[dfNew.TEAM == 'Detroit Pistons', 'TEAM'] = "DET"
dfNew.loc[dfNew.TEAM == 'Golden State Warriors', 'TEAM'] = "GSW"
dfNew.loc[dfNew.TEAM == 'Houston Rockets', 'TEAM'] = "HOU"
dfNew.loc[dfNew.TEAM == 'Indiana Pacers', 'TEAM'] = "IND"
dfNew.loc[dfNew.TEAM == 'LA Clippers', 'TEAM'] = "LAC"
dfNew.loc[dfNew.TEAM == 'Memphis Grizzlies', 'TEAM'] = "MEM"
dfNew.loc[dfNew.TEAM == 'Miami Heat', 'TEAM'] = "MIA"
dfNew.loc[dfNew.TEAM == 'Milwaukee Bucks', 'TEAM'] = "MIL"
dfNew.loc[dfNew.TEAM == 'Minnesota Timberwolves', 'TEAM'] = "MIN"
dfNew.loc[dfNew.TEAM == 'New Orleans Pelicans', 'TEAM'] = "NOP"
dfNew.loc[dfNew.TEAM == 'New York Knicks', 'TEAM'] = "NYK"
dfNew.loc[dfNew.TEAM == 'Oklahoma City Thunder', 'TEAM'] = "OKC"
dfNew.loc[dfNew.TEAM == 'Orlando Magic', 'TEAM'] = "ORL"
dfNew.loc[dfNew.TEAM == 'Philadelphia 76ers', 'TEAM'] = "PHI"
dfNew.loc[dfNew.TEAM == 'Phoenix Suns', 'TEAM'] = "PHO"
dfNew.loc[dfNew.TEAM == 'Portland Trail Blazers', 'TEAM'] = "POR"
dfNew.loc[dfNew.TEAM == 'Sacramento Kings', 'TEAM'] = "SAC"
dfNew.loc[dfNew.TEAM == 'San Antonio Spurs', 'TEAM'] = "SAS"
dfNew.loc[dfNew.TEAM == 'Los Angeles Lakers', 'TEAM'] = "LAL"
dfNew.loc[dfNew.TEAM == 'Toronto Raptors', 'TEAM'] = "TOR"
dfNew.loc[dfNew.TEAM == 'Utah Jazz', 'TEAM'] = "UTA"
dfNew.loc[dfNew.TEAM == 'Washington Wizards', 'TEAM'] = "WAS"
url3 = "https://www.basketball-reference.com/leagues/NBA_2019_ratings.html"
df_list = pd.read_html(url3)
evenmoreteamData = df_list[0]
evenmoreteamData = pd.DataFrame(evenmoreteamData)
evenmoreteamData.columns = evenmoreteamData.columns.droplevel()
evenmoreteamData.loc[
evenmoreteamData.Team == 'Atlanta Hawks', 'Team'] = "ATL"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Boston Celtics', 'Team'] = "BOS"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Brooklyn Nets', 'Team'] = "BRK"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Charlotte Hornets', 'Team'] = "CHA"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Chicago Bulls', 'Team'] = "CHI"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Cleveland Cavaliers', 'Team'] = "CLE"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Dallas Mavericks', 'Team'] = "DAL"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Denver Nuggets', 'Team'] = "DEN"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Detroit Pistons', 'Team'] = "DET"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Golden State Warriors', 'Team'] = "GSW"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Houston Rockets', 'Team'] = "HOU"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Indiana Pacers', 'Team'] = "IND"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Los Angeles Clippers', 'Team'] = "LAC"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Memphis Grizzlies', 'Team'] = "MEM"
evenmoreteamData.loc[evenmoreteamData.Team == 'Miami Heat', 'Team'] = "MIA"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Milwaukee Bucks', 'Team'] = "MIL"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Minnesota Timberwolves', 'Team'] = "MIN"
evenmoreteamData.loc[
evenmoreteamData.Team == 'New Orleans Pelicans', 'Team'] = "NOP"
evenmoreteamData.loc[
evenmoreteamData.Team == 'New York Knicks', 'Team'] = "NYK"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Oklahoma City Thunder', 'Team'] = "OKC"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Orlando Magic', 'Team'] = "ORL"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Philadelphia 76ers', 'Team'] = "PHI"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Phoenix Suns', 'Team'] = "PHO"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Portland Trail Blazers', 'Team'] = "POR"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Sacramento Kings', 'Team'] = "SAC"
evenmoreteamData.loc[
evenmoreteamData.Team == 'San Antonio Spurs', 'Team'] = "SAS"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Los Angeles Lakers', 'Team'] = "LAL"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Toronto Raptors', 'Team'] = "TOR"
evenmoreteamData.loc[evenmoreteamData.Team == 'Utah Jazz', 'Team'] = "UTA"
evenmoreteamData.loc[
evenmoreteamData.Team == 'Washington Wizards', 'Team'] = "WAS"
evenmoreteamData = evenmoreteamData[['Team', 'ORtg', 'DRtg']]
evenmoreteamData.columns = ['TEAM', 'ORtg', 'DRtg']
# CLOSE is the dataframe with everything combined
almostDone = | pd.merge(addteamData, dfNew, on=['TEAM']) | pandas.merge |
import os
import pytest
import pandas as pd
import numpy as np
from collections import OrderedDict
from ..catalog_matching import (crossmatch,
select_min_dist,
post_k2_clean,
find_campaigns,
match_k2_epic_campaign,
extract_extensions,
assign_PMem_mean,
merge_k2_info_and_protocol,
crossmatch_multiple_catalogs,
pick_members_and_produce_k2_search_input,
)
ra = 'RAJ2000'
dec = 'DEJ2000'
def test_pick_members_and_produce_k2_search_input():
#---------------------------------------------------------
# Produce fake data
cross = pd.DataFrame(dict(zip(["RAJ2000_1", "DEJ2000_1",
"somecolumn", "PMem_1",
"PMem_2", "PMem_3"],
[[20, 20, 20],
[20, 20, 20],
["rolf", "hagen", "busch"],
[.1, .8, .9],
[.1, .8, .9],
[.9, np.nan, .9]],
)))
sname, name = "test", "Test"
coords = "1"
series = cross.loc[2,:] #this row should be preserved in result
outfile = ('catalog_matching/matched_catalogs/'
'membership_matches/radec/{}_radec.csv'
.format(sname))
#---------------------------------------------------------
# Call function
res = pick_members_and_produce_k2_search_input(cross, sname,
name, coords=coords)
df = pd.read_csv(outfile, header=None)
#---------------------------------------------------------
# Check if the RA/Dec file is correct:
assert df.loc[0,0] == 20
assert df.loc[0,1] == 20
assert df.shape[0] == 1
assert df.shape[1] == 2
# Remove output file
os.remove(outfile)
# Check if the DataFrame was processed correctly
assert res.shape[0] == 1 # only one member is left
assert (res.loc[2,series.index] == series).all() # input columns are preserved
def test_crossmatch_multiple_catalogs():
#-----------------------------------------------------------
# Create a fake data set
diff = 1.49/3600 # 1.5 arcsec distance
c1 = pd.DataFrame(dict(zip(["RAJ2000_1","DEJ2000_1","PMem_1"],
[[21,20,19],[10,10,10],[.9,.8,.7]])))
c2 = pd.DataFrame(dict(zip(["RAJ2000_2","DEJ2000_2","PMem_2","binary_2"],
[[21,20+diff,19+3*diff],[10,10,10],
[.75,.85,.3],[.1,.02,.11]])))
c3 = pd.DataFrame(dict(zip(["RAJ2000_3","DEJ2000_3","PMem_3"],
[[np.nan,20-diff,19],[10,10,10],[.9,.9,.9]])))
d = {"1":c1, "2":c2, "3":c3}
renamed_catalogs = OrderedDict(sorted(d.items(), key=lambda t: t[0])) # order the dicts, not necessary for performance but helpful for testing
name = "Test"
sname = "test"
#-----------------------------------------------------------
# Call the function
res = crossmatch_multiple_catalogs(renamed_catalogs, name, sname,
arcsec=3., plot=True, union=True,
bijective=True)
#-----------------------------------------------------------
# Do some checks
# Check that the table size is correct
assert res.shape[0] == 5
assert res.shape[1] == 16
# Check that relevant columns are created with the right names/values
assert "DEJ2000_1_2_3" in res.columns.tolist()
assert set(c1.columns.values).issubset(set(res.columns.values))
assert set(c2.columns.values).issubset(set(res.columns.values))
assert set(c3.columns.values).issubset(set(res.columns.values))
# Check that the distance calculation was done correctly
assert res.loc[1, "dist_1_2_3"] == pytest.approx(2.235, rel=.1)
assert res.loc[2, "dist_1_2_3"] == 0.
# Check that NaNs stay NaNs:
assert np.isnan(res.loc[4, "RAJ2000_3"])
# Check individual values and NaNs:
assert res.loc[2, "RAJ2000_1_2_3"] == 19.
assert (res.DEJ2000_1_2_3.values == 10.).all()
assert res.dist_1_2.dropna().shape[0] == 2
assert res.dist_1_2_3.dropna().shape[0] == 2
def test_merge_k2_info_and_protocol():
# Fake data
folder = "catalog_matching/tests/exfiles/"
sname = "tiny"
df = pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union_k2.csv')
racols = list(filter(lambda k: ra in k, df.columns.values))
deccols = list(filter(lambda k: dec in k, df.columns.values))
df["{}_mean".format(ra)] = df[racols].mean(axis=1)
df["{}_mean".format(dec)] = df[deccols].mean(axis=1)
# Call function
merge_k2_info_and_protocol(df, "tiny", "mean", folder=folder)
data = pd.read_csv("{}members.txt".format(folder))
assert data.shape[1] == 6
assert data.shape[0] == 1
assert data.nlikelymembers[0] == 7
assert data.LCs[0] == 5
assert data.cluster[0] == "tiny"
# Remove the test file, as it would get longer and longer over time.
os.remove("{}members.txt".format(folder))
def test_assign_PMem_mean():
# Test a working example:
df = pd.DataFrame(index=np.arange(10),
columns=["RAJ2000_1","DEJ2000_1","PMem_1",
"RAJ2000","DEJ2000_holla","PMem_holla",
"RAJ2000_2","DEJ2000_BRm2","PMem_dd",])
df["PMem_dd"] = np.arange(0,1.0,.1)
df["PMem_holla"] = np.arange(0,1.0,.1)
df["PMem_1"] = [.1] * 5 + [np.nan] * 5
df = assign_PMem_mean(df)
assert (df.PMem_mean.tolist()
== pytest.approx([.1/3, .1, .5/3, .7/3, .3, .5, .6, .7, .8, .9]))
def test_extract_extensions():
#---------------------------
# Create fake data
df = pd.DataFrame(columns=["RAJ2000_1","DEJ2000_1","PMem_1",
"RAJ2000","DEJ2000_holla","PMem_holla",
"RAJ2000_2","DEJ2000_BRm2","PMem_dd",])
# Test output as extensions
assert extract_extensions(df, prefix="RAJ2000") == ["1","2"]
assert extract_extensions(df, prefix="DEJ2000") == ["1","holla","BRm2"]
assert extract_extensions(df, prefix="PMem") == ["1","holla","dd"]
# Test output as column names
assert extract_extensions(df, prefix="PMem", retcolnames=True) == ['PMem_1', 'PMem_holla', 'PMem_dd']
assert extract_extensions(df, prefix="RAJ2000", retcolnames=True) == ["RAJ2000_1","RAJ2000_2"]
assert extract_extensions(df, prefix="DEJ2000", retcolnames=True) == ["DEJ2000_1","DEJ2000_holla","DEJ2000_BRm2"]
# Must pass a string-convertible prefix
with pytest.raises(TypeError):
extract_extensions(df, prefix=None)
with pytest.raises(TypeError):
extract_extensions(df, prefix=1.44)
assert set(extract_extensions(df, prefix="", retcolnames=True)) == set(df.columns.tolist()) - {"RAJ2000"}
def test_match_k2_epic_campaign():
# set up fake data
testdf = pd.DataFrame({"RAJ2000":[57.13268195367, 132.8329500, 59.],
"DEJ2000":[24.03288651412, 11.7834400, -25.],
"smh":["blo","blo","blo"]})
# run function
resdf = match_k2_epic_campaign(testdf)
# test some values from the results
assert resdf.Campaign[0] == 4.
assert resdf.Campaign[1] == 5.
assert resdf.Campaign[2] == 16.
assert resdf.Campaign[3] == 18.
assert np.isnan(resdf.Campaign[4])
assert (resdf.smh.values == "blo").all()
assert resdf.loc[4,:].DEJ2000 == -25.
assert resdf.loc[4,:].RAJ2000 == 59.
assert resdf.loc[0,:].EPIC == '211066477'
assert resdf.loc[1,:].EPIC == '211409345'
assert resdf.loc[2,:].EPIC == '211409345'
assert resdf.loc[3,:].EPIC == '211409345'
def test_find_campaigns():
# success modes are tested in match_k2_epic_campaign
# test failure modes
for t1 in [find_campaigns("blo ddd"), find_campaigns("67777 9888"),
find_campaigns("0 nan")]:
assert np.isnan(t1[0][0])
assert np.isnan(t1[1])
def test_crossmatch():
name = 'Tiny Cluster'
sname ='tiny'
a = pd.read_csv('catalog_matching/tests/exfiles/tiny_a.csv')
b = pd.read_csv('catalog_matching/tests/exfiles/tiny_b.csv')
d = pd.read_csv('catalog_matching/tests/exfiles/tiny_d.csv')#modified b
keys = ['a','b']
df = crossmatch(a, b, keys, name, sname, arcsec=5., plot=False)
t = pd.read_csv('catalog_matching/tests/exfiles/crossmatch_result.csv')
assert t.equals(df)
df = crossmatch(a, b, keys, name, sname, arcsec=5., plot=False, union=True)
t = pd.read_csv('catalog_matching/tests/exfiles/crossmatch_result_union.csv')
assert t.equals(df)
df = crossmatch(b, a, keys[::-1], name, sname, arcsec=5., plot=False, union=True)
t = pd.read_csv('catalog_matching/tests/exfiles/crossmatch_result_union_reverse.csv')
assert t.equals(df)
df = crossmatch(a, d, ['a','d'], name, sname, arcsec=5., plot=False, union=True, bijective=True)
t = pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union.csv')
assert df.equals(t)
def test_select_min_dist():
name = 'Tiny Cluster'
sname ='tiny'
a = pd.read_csv('catalog_matching/tests/exfiles/tiny_a.csv')
b = pd.read_csv('catalog_matching/tests/exfiles/tiny_b.csv')
c = pd.read_csv('catalog_matching/tests/exfiles/tiny_c.csv')#modified a
d = pd.read_csv('catalog_matching/tests/exfiles/tiny_d.csv')#modified b
df = crossmatch(c,b,['c','b'], name, sname, arcsec=5)
df = select_min_dist(df,['c','b'])
t = pd.read_csv('catalog_matching/tests/exfiles/select_min_dist.csv')
assert df.equals(t)
df = crossmatch(a,d,['a','d'], name, sname, arcsec=5, union=True)
df = select_min_dist(df,['a','d'])
t = pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union.csv')
assert df.equals(t)
def test_post_k2_clean():
"""Test data cleaning and merging procedure
on a tiny fake data set.
"""
# Read in data file and process it:
C = 'tiny'
df = | pd.read_csv('catalog_matching/tests/exfiles/select_min_dist_union_k2.csv') | pandas.read_csv |
import json
import os
from datetime import datetime
import pandas as pd
import pytest
from py.path import local
from pytest import fixture
from socceraction.data.base import MissingDataError
from socceraction.data.opta import (
OptaEventSchema,
OptaGameSchema,
OptaPlayerSchema,
OptaTeamSchema,
)
from socceraction.data.opta.parsers import WhoScoredParser
@fixture()
def whoscored_parser() -> WhoScoredParser:
path = os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
"datasets",
"whoscored",
"1005916.json",
)
return WhoScoredParser(str(path), competition_id=5, season_id=1516, game_id=1005916)
def test_extract_competition_id(tmpdir: local) -> None:
path = os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
"datasets",
"whoscored",
"1005916.json",
)
# Read from parameter is the default
parser = WhoScoredParser(path, competition_id=1234, season_id=1516, game_id=1005916)
assert parser.competition_id == 1234
# Read from stream
parser = WhoScoredParser(path, competition_id=None, season_id=1516, game_id=1005916)
assert parser.competition_id == 5
# Raise error when not in stream
p = tmpdir.join("1005916.json")
p.write(json.dumps({}))
with pytest.raises(MissingDataError):
WhoScoredParser(str(p), competition_id=None, season_id=1516, game_id=1005916)
def test_extract_season_id(tmpdir: local) -> None:
path = os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
"datasets",
"whoscored",
"1005916.json",
)
# Read from parameter is the default
parser = WhoScoredParser(path, competition_id=5, season_id=1234, game_id=1005916)
assert parser.season_id == 1234
# Read from stream
parser = WhoScoredParser(path, competition_id=5, season_id=None, game_id=1005916)
assert parser.season_id == 1516
# Raise error when not in stream
p = tmpdir.join("1005916.json")
p.write(json.dumps({}))
with pytest.raises(MissingDataError):
WhoScoredParser(str(p), competition_id=5, season_id=None, game_id=1005916)
def test_extract_game_id(tmpdir: local) -> None:
path = os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
"datasets",
"whoscored",
"1005916.json",
)
# Read from parameter is the default
parser = WhoScoredParser(path, competition_id=5, season_id=1516, game_id=1234)
assert parser.game_id == 1234
# Read from stream
parser = WhoScoredParser(path, competition_id=5, season_id=1516, game_id=None)
assert parser.game_id == 1005916
# Raise error when not in stream
p = tmpdir.join("1005916.json")
p.write(json.dumps({}))
with pytest.raises(MissingDataError):
WhoScoredParser(str(p), competition_id=5, season_id=1516, game_id=None)
def test_extract_games(whoscored_parser: WhoScoredParser) -> None:
games = whoscored_parser.extract_games()
assert len(games) == 1
assert games[1005916] == {
"game_id": 1005916,
"season_id": 1516,
"competition_id": 5,
"game_day": None,
"game_date": datetime(2015, 8, 23, 19, 45),
"home_team_id": 272,
"away_team_id": 267,
"home_score": 1,
"away_score": 3,
"duration": 96,
"venue": "Carlo Castellani",
"attendance": 7309,
"referee": "<NAME>",
"home_manager": "<NAME>",
"away_manager": "<NAME>",
}
OptaGameSchema.validate(pd.DataFrame.from_dict(games, orient="index"))
def test_extract_teams(whoscored_parser: WhoScoredParser) -> None:
teams = whoscored_parser.extract_teams()
assert len(teams) == 2
assert teams[272] == {
"team_id": 272,
"team_name": "Empoli",
}
assert teams[267] == {
"team_id": 267,
"team_name": "Chievo",
}
OptaTeamSchema.validate(pd.DataFrame.from_dict(teams, orient="index"))
def test_extract_players(whoscored_parser: WhoScoredParser) -> None:
players = whoscored_parser.extract_players()
assert len(players) == 21 + 23
assert players[(1005916, 4444)] == {
"game_id": 1005916,
"team_id": 267,
"player_id": 4444,
"player_name": "<NAME>",
"is_starter": True,
"minutes_played": 96,
"jersey_number": 1,
"starting_position": "GK",
}
OptaPlayerSchema.validate(pd.DataFrame.from_dict(players, orient="index"))
def test_extract_events(whoscored_parser: WhoScoredParser) -> None:
events = whoscored_parser.extract_events()
assert len(events) == 1562
assert events[(1005916, 832925173)] == {
"game_id": 1005916,
"event_id": 832925173,
"period_id": 1,
"team_id": 272,
"player_id": 128778,
"type_id": 1,
"timestamp": datetime(2015, 8, 23, 19, 45, 1),
"minute": 0,
"second": 1,
"outcome": True,
"start_x": 50.9,
"start_y": 48.8,
"end_x": 35.9,
"end_y": 49.8,
"qualifiers": {56: "Back", 140: "35.9", 141: "49.8", 212: "15.8", 213: "3.1"},
"related_player_id": None,
"goal": False,
"shot": False,
"touch": True,
}
df = | pd.DataFrame.from_dict(events, orient="index") | pandas.DataFrame.from_dict |
"""
Example use of vixutil to plot the term structure.
Be sure to run vixutil -r first to download the data.
"""
import vixutil as vutil
import pandas as pd
import logging as logging
import asyncio
import sys
pd.set_option('display.max_rows', 10)
#need over two months
pd.set_option('display.min_rows', 10)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
logger=logging.getLogger()
logger.setLevel(logging.INFO)
vutils=vutil.VixUtilsApi()
weights=vutils.get_vix_futures_constant_maturity_weights()
constant_maturity_term_structure = vutils.get_vix_futures_constant_maturity_term_structure()
cash_vix = vutils.get_cash_vix_term_structure()
futures_term_structure = vutils.get_vix_futures_term_structure()
wide_vix_calendar=vutils.get_vix_futures_constant_maturity_weights()
sep_lines = "_"*25+"\n"
constant_maturity_weights=vutils.get_vix_futures_constant_maturity_weights()
try:
import matplotlib.pyplot as plt
import scipy.stats as bc
except Exception as e:
logging.warning(f"""Exception {e} while trying to plot. matplotlip and scipy.stats
are required to run the plots in this example. Install them into your environment if you want to
see the graphs.""")
sys.exit(-3)
# the nine month has some bad data in it
#futures_term_structure = futures_term_structure.swaplevel(0,1,axis=1).drop(columns=[9]).swaplevel(0, 1, axis=1)
#futures_term_structure.drop(level=1,columns=[9,8],inplace=True)
futures_term_structure[['Close']].plot()
# futures_term_structure[['VIX1M_SPVIXSTR','Close']].plot()
plt.show()
constant_maturity_term_structure[['Close']].plot()
print(f"Constant maturity term structure {constant_maturity_term_structure}")
plt.show()
print(f"Cash vix {cash_vix}")
b=cash_vix['Close'][['VIX3M','VIX','VIX9D']]
b.plot()
plt.show()
#plot the term structure for Feb 16, 2021
day_of_interest = '2021-02-16'
s1 = futures_term_structure.loc[day_of_interest][["Close", "Settlement Date"]]
s2 = constant_maturity_term_structure.loc[day_of_interest][["Close", "Settlement Date"]]
s1.index = pd.Index([ (a,f"{b}") for a,b in s1.index])
s3= | pd.concat([s1,s2]) | pandas.concat |
from __future__ import annotations
import os
import hashlib
from collections import OrderedDict
from typing import List, Optional, Callable, Dict, Any, Union
import pandas as pd
import colorama
import pprint
import json
class Parser:
def __init__(self, hashed_resources_folder: str):
self._hashed_resources_folder = hashed_resources_folder
os.makedirs(self._hashed_resources_folder, exist_ok=True)
self._models_folder = os.path.join(self._hashed_resources_folder, 'all')
os.makedirs(self._models_folder, exist_ok=True)
self._is_parser = True
self._dependencies = {}
self._parser = None
def new_instance(self):
instance = self.__new__(type(self))
instance._is_parser = False
instance._parser = self
d = self.get_hyperparameters(calling_from_pure_parser=True)
for k, v in d.items():
setattr(instance, k, v)
return instance
def load_instance_from_disk_by_hash(self, instance_hash: str, resource_name: Optional[str] = None,
skip_missing=False) -> Union[Parser, None]:
assert self._is_parser is True
if resource_name is None:
hash_folder = os.path.join(self._models_folder, instance_hash)
else:
hash_folder = os.path.join(self._hashed_resources_folder, resource_name, instance_hash)
instance_info_file = os.path.join(hash_folder, 'instance_info.json')
if not os.path.isfile(instance_info_file) and skip_missing:
print('instance not found')
return None
with open(instance_info_file, 'r') as infile:
instance_info = json.load(infile)
instance = self.new_instance()
expected = instance.get_hashable_hyperparameters()
if resource_name is not None:
expected = expected.intersection(set(instance._parser._dependencies[resource_name]))
real = {k for k, v in instance_info.items() if v is not None}
if len(real.difference(expected)) > 0:
print(f'hyperparameters in the set real but not in the set expected: {real.difference(expected)}')
print('skipping the instance')
return None
if len(expected.difference(real)) > 0:
print(f'warning: some parameters are expected but are not found in the loaded instance, they have been set to None')
print(f'hyperparameters in the set expected but not in the set real: {expected.difference(real)}')
for k, v in instance_info.items():
setattr(instance, k, v)
computed_hash = instance.get_instance_hash(resource_name=resource_name)
# havent tested properly when this happen, let's just ignore these cases
if instance_hash != computed_hash:
return None
# assert instance_hash == computed_hash
return instance
def get_hyperparameters(self, calling_from_pure_parser=False):
keys = dir(self)
if calling_from_pure_parser:
keys = [key for key in keys if key not in self.__dict__]
keys = [key for key in keys if not hasattr(Parser, key)]
keys = [key for key in keys if not callable(getattr(self, key))]
keys = [key for key in keys if not key.startswith('_')]
values = [getattr(self, key) for key in keys]
d = dict(zip(keys, values))
return d
def get_hashable_hyperparameters(self, only_non_none=False):
d = self.get_hyperparameters()
if not only_non_none:
hashable_hyperparameters = set(type(self).__dict__.keys()).intersection(d.keys())
else:
hashable_hyperparameters = {k for k, v in type(self).__dict__.items() if v is not None}.intersection(d.keys())
return hashable_hyperparameters
def get_instance_hash(self, resource_name: Optional[str] = None):
assert self._is_parser is False
h = hashlib.sha256()
keys = self.get_hashable_hyperparameters()
d = self.get_hyperparameters()
od = OrderedDict(sorted({k: d[k] for k in keys}.items()))
used_for_hash = dict()
for k, v in od.items():
use_hash = True
if resource_name is not None:
dependencies = self._parser._dependencies[resource_name]
if k not in dependencies:
use_hash = False
if v is None:
use_hash = False
if use_hash:
h.update(str(k).encode('utf-8'))
h.update(str(v).encode('utf-8'))
used_for_hash[k] = v
instance_hash = h.hexdigest()
if resource_name is None:
hash_folder = os.path.join(self._parser._models_folder, instance_hash)
else:
hash_folder = os.path.join(self._parser._hashed_resources_folder, resource_name, instance_hash)
os.makedirs(hash_folder, exist_ok=True)
return instance_hash
def _generate_instance_info_files_for_resource(self, instance: Parser, resource_name: Optional[str] = None):
assert self._is_parser is True
instance_or_resource_hash = instance.get_instance_hash(resource_name=resource_name)
if resource_name is None:
hash_folder = os.path.join(self._models_folder, instance_or_resource_hash)
else:
hash_folder = os.path.join(self._hashed_resources_folder, resource_name, instance_or_resource_hash)
os.makedirs(hash_folder, exist_ok=True)
f = os.path.join(hash_folder, 'instance_info.json')
# print(f'creating {f}')
keys = instance.get_hashable_hyperparameters()
d = instance.get_hyperparameters()
od = OrderedDict(sorted({k: d[k] for k in keys}.items()))
used_for_hash = dict()
for k, v in od.items():
use_hash = True
if resource_name is not None:
dependencies = self._dependencies[resource_name]
if k not in dependencies:
use_hash = False
if use_hash:
used_for_hash[k] = v
with open(f, 'w') as outfile:
json.dump(used_for_hash, outfile, indent=4)
def generate_instance_info_files(self, instances: List[Parser]):
assert self._is_parser is True
resource_names = list(self._dependencies.keys())
for instance in instances:
for resource_name in [None, *resource_names]:
self._generate_instance_info_files_for_resource(instance, resource_name=resource_name)
def register_new_resource(self, name: str, dependencies: List[str]):
assert self._is_parser is True
l = self.get_hashable_hyperparameters()
for k in dependencies:
assert k in l
self._dependencies[name] = dependencies
path = os.path.join(self._hashed_resources_folder, name)
os.makedirs(path, exist_ok=True)
def get_dependencies_for_resources(self, name: str):
if self._is_parser:
parser = self
else:
parser = self._parser
return parser._dependencies[name]
def get_resources_path(self, resource_name: Optional[str] = None):
if self._is_parser:
parser = self
else:
parser = self._parser
if resource_name is None:
return os.path.join(parser._models_folder, self.get_instance_hash())
else:
return os.path.join(parser._hashed_resources_folder, resource_name, self.get_instance_hash(resource_name))
@staticmethod
def cartesian_product(d):
index = pd.MultiIndex.from_product(d.values(), names=d.keys())
return pd.DataFrame(index=index).reset_index()
def get_instances_from_df(self, df):
assert self._is_parser is True
instances = []
for _, row in df.iterrows():
instance = self.new_instance()
d = row.to_dict()
for k, v in d.items():
setattr(instance, k, v)
instances.append(instance)
hashes = [instance.get_instance_hash() for instance in instances]
if len(hashes) != len(set(hashes)):
print(f'{colorama.Fore.YELLOW}warning: some instances differ only by non-hashable parameters, review your '
f'list to avoid unnecessary computations', colorama.Fore.RESET)
return instances
def get_instances_from_dictionary(self, d):
assert self._is_parser is True
df = self.cartesian_product(d)
return self.get_instances_from_df(df)
def get_resource_names(self):
if self._is_parser:
parser = self
else:
parser = self._parser
return sorted(list(parser._dependencies.keys()))
@staticmethod
def get_resources(instances: List[Parser], resource_name: str) -> List[Parser]:
unique = {}
for instance in instances:
resource_hash = instance.get_instance_hash(resource_name)
unique[resource_hash] = instance
# maybe here I should set all the other hyperparameters to None
return list(unique.values())
@staticmethod
def get_projections(instances: List[Parser], hyperparameter_names: List[str], resource_name: Optional[str] = None):
df = pd.DataFrame(columns=hyperparameter_names)
rows = []
instance_hashes = []
# maybe I should add a test to check the all the keys are equal among instances, as expected
for instance in instances:
d = instance.get_hyperparameters()
row = {name: d[name] for name in hyperparameter_names}
rows.append(row)
instance_hashes.append(instance.get_instance_hash(resource_name))
df = df.append(rows)
multi_index = pd.MultiIndex.from_arrays([df.index.to_list(), instance_hashes], names=('incremental index', 'instance_hash'))
df.index = multi_index
df.drop_duplicates(inplace=True)
return df
@staticmethod
def get_df_of_instances(instances: List[Parser], resource_name: Optional[str] = None):
if len(instances) == 0:
return | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#RIL Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
tf = 201
#Parameters for residue decomposition (Source: De Rosa et al., 2017)
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
#df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S1')
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
t = range(0,tf,1)
#c_loss_S1 = df1['C_loss'].values
c_firewood_energy_S2 = df2['Firewood_other_energy_use'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S2
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
tf = 201
t = np.arange(tf)
def decomp_S2(t,remainAGB_S2):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2
#set zero matrix
output_decomp_S2 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2[i:,i] = decomp_S2(t[:len(t)-i],remain_part_S2)
print(output_decomp_S2[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2[:,i] = np.diff(output_decomp_S2[:,i])
i = i + 1
print(subs_matrix_S2[:,:4])
print(len(subs_matrix_S2))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2 = subs_matrix_S2.clip(max=0)
print(subs_matrix_S2[:,:4])
#make the results as absolute values
subs_matrix_S2 = abs(subs_matrix_S2)
print(subs_matrix_S2[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2)
subs_matrix_S2 = np.vstack((zero_matrix_S2, subs_matrix_S2))
print(subs_matrix_S2[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2 = (tf,1)
decomp_tot_S2 = np.zeros(matrix_tot_S2)
i = 0
while i < tf:
decomp_tot_S2[:,0] = decomp_tot_S2[:,0] + subs_matrix_S2[:,i]
i = i + 1
print(decomp_tot_S2[:,0])
#S2_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_S2')
tf = 201
t = np.arange(tf)
def decomp_S2_C(t,remainAGB_S2_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_C
#set zero matrix
output_decomp_S2_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_C[i:,i] = decomp_S2_C(t[:len(t)-i],remain_part_S2_C)
print(output_decomp_S2_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_C[:,i] = np.diff(output_decomp_S2_C[:,i])
i = i + 1
print(subs_matrix_S2_C[:,:4])
print(len(subs_matrix_S2_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_C = subs_matrix_S2_C.clip(max=0)
print(subs_matrix_S2_C[:,:4])
#make the results as absolute values
subs_matrix_S2_C = abs(subs_matrix_S2_C)
print(subs_matrix_S2_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_C)
subs_matrix_S2_C = np.vstack((zero_matrix_S2_C, subs_matrix_S2_C))
print(subs_matrix_S2_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_C = (tf,1)
decomp_tot_S2_C = np.zeros(matrix_tot_S2_C)
i = 0
while i < tf:
decomp_tot_S2_C[:,0] = decomp_tot_S2_C[:,0] + subs_matrix_S2_C[:,i]
i = i + 1
print(decomp_tot_S2_C[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_E(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E)
subs_matrix_E = np.vstack((zero_matrix_E, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#E_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_E')
tf = 201
t = np.arange(tf)
def decomp_E_C(t,remainAGB_E_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E_C
#set zero matrix
output_decomp_E_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E_C[i:,i] = decomp_E_C(t[:len(t)-i],remain_part_E_C)
print(output_decomp_E_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E_C[:,i] = np.diff(output_decomp_E_C[:,i])
i = i + 1
print(subs_matrix_E_C[:,:4])
print(len(subs_matrix_E_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_C = subs_matrix_E_C.clip(max=0)
print(subs_matrix_E_C[:,:4])
#make the results as absolute values
subs_matrix_E_C = abs(subs_matrix_E_C)
print(subs_matrix_E_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_C)
subs_matrix_E_C = np.vstack((zero_matrix_E_C, subs_matrix_E_C))
print(subs_matrix_E_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_C = (tf,1)
decomp_tot_E_C = np.zeros(matrix_tot_E_C)
i = 0
while i < tf:
decomp_tot_E_C[:,0] = decomp_tot_E_C[:,0] + subs_matrix_E_C[:,i]
i = i + 1
print(decomp_tot_E_C[:,0])
#plotting
t = np.arange(0,tf)
#plt.plot(t,decomp_tot_S1,label='S1')
plt.plot(t,decomp_tot_S2,label='S2')
plt.plot(t,decomp_tot_E,label='E')
plt.plot(t,decomp_tot_S2_C,label='S2_C')
plt.plot(t,decomp_tot_E_C,label='E_C')
plt.xlim(0,200)
plt.legend(loc='best', frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
#product lifetime
#building materials
B = 35
TestDSM2 = DynamicStockModel(t = df2['Year'].values, i = df2['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2, ExitFlag2 = TestDSM2.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort2, ExitFlag2 = TestDSM2.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S2, ExitFlag2 = TestDSM2.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C2, ExitFlag2 = TestDSM2.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O2, ExitFlag2 = TestDSM2.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS2, ExitFlag2 = TestDSM2.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal2, ExitFlag2 = TestDSM2.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM2.o)
print(TestDSME.o)
plt.xlim(0,100)
plt.show()
#%%
#Step (5): Biomass growth
# RIL_Scenario biomass growth, following RIL disturbance
#recovery time, follow the one by Alice-guier
#H = [M, E, C_M, C_E]
#LD0 = [M, E, C_M, C_E]
H = [5.78, 7.71, 5.78, 7.71]
LD0 = [53.46-5.78, 53.46-7.71, 29.29-5.78, 29.29-7.71]
s = 1.106
#RIL
RT = ((H[0] + LD0[0])*100/initAGB)**s
print(RT)
#growth per year (Mg C/ha.yr)
gpy = (H[0] + LD0[0])/RT
print(gpy)
tf_RIL_S1 = 36
A1 = range(0,tf_RIL_S1,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_S1(A1):
return 44/12*1000*gpy*A1
seq_RIL = np.array([Y_RIL_S1(A1i) for A1i in A1])
print(len(seq_RIL))
print(seq_RIL)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL = []
for i in counter_35y:
y_RIL.append(seq_RIL)
flat_list_RIL = []
for sublist in y_RIL:
for item in sublist:
flat_list_RIL.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL = flat_list_RIL[:len(flat_list_RIL)-15]
print(flat_list_RIL)
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL = [p - q for q, p in zip(flat_list_RIL, flat_list_RIL[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL = [0 if i < 0 else i for i in flat_list_RIL]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL = [ -x for x in flat_list_RIL]
print(flat_list_RIL)
#RIL_C
RT_C = ((H[2] + LD0[2])*100/initAGB)**s
print(RT_C)
#growth per year (Mg C/ha.yr)
gpy_C = (H[2] + LD0[2])/RT_C
print(gpy_C)
tf_RIL_C = 36
A1 = range(0,tf_RIL_C,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_C(A1):
return 44/12*1000*gpy_C*A1
seq_RIL_C = np.array([Y_RIL_C(A1i) for A1i in A1])
print(len(seq_RIL_C))
print(seq_RIL_C)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL_C = []
for i in counter_35y:
y_RIL_C.append(seq_RIL_C)
flat_list_RIL_C = []
for sublist_C in y_RIL_C:
for item in sublist_C:
flat_list_RIL_C.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL_C = flat_list_RIL_C[:len(flat_list_RIL_C)-15]
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL_C, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL_C = [p - q for q, p in zip(flat_list_RIL_C, flat_list_RIL_C[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL_C = [0 if i < 0 else i for i in flat_list_RIL_C]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL_C.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL_C = [ -x for x in flat_list_RIL_C]
print(flat_list_RIL_C)
#%%
#Step (5_1): Biomass C sequestration of the remaining unharvested block
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
df2_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_S2')
dfE= pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
dfE_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_E')
t = range(0,tf,1)
RIL_seq_S2= df2['RIL_seq'].values
RIL_seq_C_S2= df2_C['RIL_seq'].values
RIL_seq_E = dfE['RIL_seq'].values
RIL_seq_C_E = dfE_C['RIL_seq'].values
#%%
#Step (6): post-harvest processing of wood
#post-harvest wood processing
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
t = range(0,tf,1)
PH_Emissions_HWP2 = df2['PH_Emissions_HWP'].values
PH_Emissions_HWPE = dfE ['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2
df2_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2(t,Landfill_decomp_CH4_S2):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_S2
#set zero matrix
output_decomp_CH4_S2 = np.zeros((len(t),len(df2_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2 in enumerate(df2_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2[i:,i] = decomp_CH4_S2(t[:len(t)-i],remain_part_CH4_S2)
print(output_decomp_CH4_S2[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2 = np.zeros((len(t)-1,len(df2_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2[:,i] = np.diff(output_decomp_CH4_S2[:,i])
i = i + 1
print(subs_matrix_CH4_S2[:,:4])
print(len(subs_matrix_CH4_S2))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2 = subs_matrix_CH4_S2.clip(max=0)
print(subs_matrix_CH4_S2[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2 = abs(subs_matrix_CH4_S2)
print(subs_matrix_CH4_S2[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2 = np.zeros((len(t)-200,len(df2_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2)
subs_matrix_CH4_S2 = np.vstack((zero_matrix_CH4_S2, subs_matrix_CH4_S2))
print(subs_matrix_CH4_S2[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2 = (tf,1)
decomp_tot_CH4_S2 = np.zeros(matrix_tot_CH4_S2)
i = 0
while i < tf:
decomp_tot_CH4_S2[:,0] = decomp_tot_CH4_S2[:,0] + subs_matrix_CH4_S2[:,i]
i = i + 1
print(decomp_tot_CH4_S2[:,0])
#E
dfE_CH4 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_CH4_E(t,Landfill_decomp_CH4_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CH4_E
#set zero matrix
output_decomp_CH4_E = np.zeros((len(t),len(dfE_CH4['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E in enumerate(dfE_CH4['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E[i:,i] = decomp_CH4_E(t[:len(t)-i],remain_part_CH4_E)
print(output_decomp_CH4_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E = np.zeros((len(t)-1,len(dfE_CH4['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E[:,i] = np.diff(output_decomp_CH4_E[:,i])
i = i + 1
print(subs_matrix_CH4_E[:,:4])
print(len(subs_matrix_CH4_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E = subs_matrix_CH4_E.clip(max=0)
print(subs_matrix_CH4_E[:,:4])
#make the results as absolute values
subs_matrix_CH4_E = abs(subs_matrix_CH4_E)
print(subs_matrix_CH4_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E = np.zeros((len(t)-200,len(dfE_CH4['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E)
subs_matrix_CH4_E = np.vstack((zero_matrix_CH4_E, subs_matrix_CH4_E))
print(subs_matrix_CH4_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E = (tf,1)
decomp_tot_CH4_E = np.zeros(matrix_tot_CH4_E)
i = 0
while i < tf:
decomp_tot_CH4_E[:,0] = decomp_tot_CH4_E[:,0] + subs_matrix_CH4_E[:,i]
i = i + 1
print(decomp_tot_CH4_E[:,0])
#plotting
t = np.arange(0,tf)
#plt.plot(t,decomp_tot_CH4_S1,label='S1')
plt.plot(t,decomp_tot_CH4_S2,label='S2')
plt.plot(t,decomp_tot_CH4_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2
df2_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2(t,Landfill_decomp_CO2_S2):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_S2
#set zero matrix
output_decomp_CO2_S2 = np.zeros((len(t),len(df2_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2 in enumerate(df2_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2[i:,i] = decomp_CO2_S2(t[:len(t)-i],remain_part_CO2_S2)
print(output_decomp_CO2_S2[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2 = np.zeros((len(t)-1,len(df2_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2[:,i] = np.diff(output_decomp_CO2_S2[:,i])
i = i + 1
print(subs_matrix_CO2_S2[:,:4])
print(len(subs_matrix_CO2_S2))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2 = subs_matrix_CO2_S2.clip(max=0)
print(subs_matrix_CO2_S2[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2 = abs(subs_matrix_CO2_S2)
print(subs_matrix_CO2_S2[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2 = np.zeros((len(t)-200,len(df2_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2)
subs_matrix_CO2_S2 = np.vstack((zero_matrix_CO2_S2, subs_matrix_CO2_S2))
print(subs_matrix_CO2_S2[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2 = (tf,1)
decomp_tot_CO2_S2 = np.zeros(matrix_tot_CO2_S2)
i = 0
while i < tf:
decomp_tot_CO2_S2[:,0] = decomp_tot_CO2_S2[:,0] + subs_matrix_CO2_S2[:,i]
i = i + 1
print(decomp_tot_CO2_S2[:,0])
#E
dfE_CO2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_CO2_E(t,Landfill_decomp_CO2_E):
return (1-(1-np.exp(-k*t)))*Landfill_decomp_CO2_E
#set zero matrix
output_decomp_CO2_E = np.zeros((len(t),len(dfE_CO2['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_E in enumerate(dfE_CO2['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_E[i:,i] = decomp_CO2_E(t[:len(t)-i],remain_part_CO2_E)
print(output_decomp_CO2_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_E = np.zeros((len(t)-1,len(dfE_CO2['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_E[:,i] = np.diff(output_decomp_CO2_E[:,i])
i = i + 1
print(subs_matrix_CO2_E[:,:4])
print(len(subs_matrix_CO2_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_E = subs_matrix_CO2_E.clip(max=0)
print(subs_matrix_CO2_E[:,:4])
#make the results as absolute values
subs_matrix_CO2_E = abs(subs_matrix_CO2_E)
print(subs_matrix_CO2_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_E = np.zeros((len(t)-200,len(dfE_CO2['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_E)
subs_matrix_CO2_E = np.vstack((zero_matrix_CO2_E, subs_matrix_CO2_E))
print(subs_matrix_CO2_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_E = (tf,1)
decomp_tot_CO2_E = np.zeros(matrix_tot_CO2_E)
i = 0
while i < tf:
decomp_tot_CO2_E[:,0] = decomp_tot_CO2_E[:,0] + subs_matrix_CO2_E[:,i]
i = i + 1
print(decomp_tot_CO2_E[:,0])
#plotting
t = np.arange(0,tf)
#plt.plot(t,decomp_tot_CO2_S1,label='S1')
plt.plot(t,decomp_tot_CO2_S2,label='S2')
plt.plot(t,decomp_tot_CO2_E,label='E')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_S2 = [c_firewood_energy_S2, decomp_tot_S2[:,0], TestDSM2.o, PH_Emissions_HWP2, decomp_tot_CO2_S2[:,0]]
Emissions_E = [c_firewood_energy_E, c_pellets_E, decomp_tot_E[:,0], TestDSME.o, PH_Emissions_HWPE, decomp_tot_CO2_E[:,0]]
Emissions_S2_C = [c_firewood_energy_S2, decomp_tot_S2_C[:,0], TestDSM2.o, PH_Emissions_HWP2, decomp_tot_CO2_S2[:,0]]
Emissions_E_C = [c_firewood_energy_E, c_pellets_E, decomp_tot_E_C[:,0], TestDSME.o, PH_Emissions_HWPE, decomp_tot_CO2_E[:,0]]
Emissions_RIL_S2 = [sum(x) for x in zip(*Emissions_S2)]
Emissions_RIL_E = [sum(x) for x in zip(*Emissions_E)]
Emissions_RIL_S2_C = [sum(x) for x in zip(*Emissions_S2_C)]
Emissions_RIL_E_C = [sum(x) for x in zip(*Emissions_E_C)]
#CH4_S2
Emissions_CH4_RIL_S2 = decomp_tot_CH4_S2[:,0]
#CH4_E
Emissions_CH4_RIL_E = decomp_tot_CH4_E[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
#Col2_S1 = Emissions_RIL_S1
Col2_S2 = Emissions_RIL_S2
Col2_E = Emissions_RIL_E
Col2_S2_C = Emissions_RIL_S2_C
Col2_E_C = Emissions_RIL_E_C
#Col3_1 = Emissions_CH4_RIL_S1
Col3_2 = Emissions_CH4_RIL_S2
Col3_E = Emissions_CH4_RIL_E
Col4 = Emission_ref
Col5_2 = [x + y for x, y in zip(flat_list_RIL, RIL_seq_S2)]
Col5_E = [x + y for x, y in zip(flat_list_RIL, RIL_seq_E)]
Col5_C_2 = [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_S2)]
Col5_C_E = [x + y for x, y in zip(flat_list_RIL_C, RIL_seq_C_E)]
df2 = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2,'kg_CH4':Col3_2,'kg_CO2_seq':Col5_2,'emission_ref':Col4})
dfE = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E,'kg_CH4':Col3_E,'kg_CO2_seq':Col5_E,'emission_ref':Col4})
df2_C = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2_C,'kg_CH4':Col3_2,'kg_CO2_seq':Col5_C_2,'emission_ref':Col4})
dfE_C = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_C,'kg_CH4':Col3_E,'kg_CO2_seq':Col5_C_E,'emission_ref':Col4})
writer = pd.ExcelWriter('emissions_seq_RIL_StLF.xlsx', engine = 'xlsxwriter')
df2.to_excel(writer, sheet_name = 'RIL_S2', header=True, index=False)
dfE.to_excel(writer, sheet_name = 'RIL_E', header=True, index=False)
df2_C.to_excel(writer, sheet_name = 'RIL_C_S2', header=True, index=False)
dfE_C.to_excel(writer, sheet_name = 'RIL_C_E', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA (wood-based scenarios)
# Step (10): Set General Parameters for Dynamic LCA calculation
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##read wood-based data
#read S2
df = pd.read_excel('emissions_seq_RIL_StLF.xlsx', 'RIL_S2')
emission_CO2_S2 = df['kg_CO2'].tolist()
emission_CH4_S2 = df['kg_CH4'].tolist()
emission_CO2_seq_S2 = df['kg_CO2_seq'].tolist()
#read S2_C
df = pd.read_excel('emissions_seq_RIL_StLF.xlsx', 'RIL_C_S2')
emission_CO2_S2_C = df['kg_CO2'].tolist()
emission_CH4_S2_C = df['kg_CH4'].tolist()
emission_CO2_seq_S2_C = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read E
df = pd.read_excel('emissions_seq_RIL_StLF.xlsx', 'RIL_E') # can also index sheet by name or fetch all sheets
emission_CO2_E = df['kg_CO2'].tolist()
emission_CH4_E = df['kg_CH4'].tolist()
emission_CO2_seq_E = df['kg_CO2_seq'].tolist()
#read E_C
df = pd.read_excel('emissions_seq_RIL_StLF.xlsx', 'RIL_C_E') # can also index sheet by name or fetch all sheets
emission_CO2_E_C = df['kg_CO2'].tolist()
emission_CH4_E_C = df['kg_CH4'].tolist()
emission_CO2_seq_E_C = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S2
df = | pd.read_excel('RIL_StLF.xlsx', 'NonRW_RIL_S2') | pandas.read_excel |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import orca
import numpy as np
import pandas as pd
from activitysim.core.util import reindex
logger = logging.getLogger(__name__)
@orca.table()
def tours(non_mandatory_tours, mandatory_tours, tdd_alts):
non_mandatory_df = non_mandatory_tours.local
mandatory_df = mandatory_tours.local
# don't expect indexes to overlap
assert len(non_mandatory_df.index.intersection(mandatory_df.index)) == 0
# expect non-overlapping indexes (so the tripids dont change)
assert len(np.intersect1d(non_mandatory_df.index, mandatory_df.index, assume_unique=True)) == 0
tours = pd.concat([non_mandatory_tours.to_frame(),
mandatory_tours.to_frame()],
ignore_index=False)
# go ahead here and add the start, end, and duration here for future use
chosen_tours = tdd_alts.to_frame().loc[tours.tour_departure_and_duration]
chosen_tours.index = tours.index
df = pd.concat([tours, chosen_tours], axis=1)
assert df.index.name == 'tour_id'
# replace table function with dataframe
orca.add_table('tours', df)
return df
@orca.table()
def mandatory_tours_merged(mandatory_tours, persons_merged):
return orca.merge_tables(mandatory_tours.name,
[mandatory_tours, persons_merged])
@orca.table()
def non_mandatory_tours_merged(non_mandatory_tours, persons_merged):
tours = non_mandatory_tours
return orca.merge_tables(tours.name, tables=[
tours, persons_merged])
@orca.table()
def tours_merged(tours, persons_merged):
return orca.merge_tables(tours.name, tables=[
tours, persons_merged])
# broadcast trips onto persons using the person_id
orca.broadcast('persons', 'non_mandatory_tours',
cast_index=True, onto_on='person_id')
orca.broadcast('persons_merged', 'non_mandatory_tours',
cast_index=True, onto_on='person_id')
orca.broadcast('persons_merged', 'tours', cast_index=True, onto_on='person_id')
@orca.column("tours")
def sov_available(tours):
# FIXME
return pd.Series(1, index=tours.index)
@orca.column("tours")
def hov2_available(tours):
# FIXME
return pd.Series(1, index=tours.index)
@orca.column("tours")
def hov2toll_available(tours):
# FIXME
return pd.Series(1, index=tours.index)
@orca.column("tours")
def hov3_available(tours):
# FIXME
return pd.Series(1, index=tours.index)
@orca.column("tours")
def sovtoll_available(tours):
# FIXME
return pd.Series(1, index=tours.index)
@orca.column("tours")
def drive_local_available(tours):
# FIXME
return pd.Series(1, index=tours.index)
@orca.column("tours")
def drive_lrf_available(tours):
# FIXME
return | pd.Series(1, index=tours.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""SOUTH DAKOTA Arrest Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10iVfY_TbBf7JUU4Mba4E3dlHlHjy5Sr_
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab
from math import pi
from google.colab import files
uploaded = files.upload()
data_path = 'SDArrests.csv'
arrests = pd.read_csv(data_path, index_col=0)
arrests.head()
#arrests.isnull().sum()
newArrests = arrests.fillna(" ")
newArrests.isnull().sum()
data_path2 = "Race and Ethnicity.csv"
data = pd.read_csv(data_path2, index_col=5)
data.head()
population = data.loc[2018, ['Race', 'Population', 'share']]
population
transposedArrests = np.transpose(newArrests)
clean = transposedArrests['Total'].loc['Asian':'Unknown']
clean
r = ['White', 'Black', 'Indian', 'Asian', 'Islander', 'Other']
rawData = {'greenBars':[10357, 1621, 6229, 163, 36, 2269], 'blueBars':[718783+22087, 18895+162, 74907+3161, 14791+232, 51, 271+5511+20554+2830]}
df = pd.DataFrame(rawData)
totals = [i+j for i,j in zip(df['greenBars'], df['blueBars'])]
greenBars = [i / j * 100 for i,j in zip(df['greenBars'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['blueBars'], totals)]
fig, ax=plt.subplots(figsize = (5,10))
barWidth = 0.85
names = r
plt.bar(r, greenBars, color='#b5ffb9', edgecolor='white', width=barWidth)
plt.bar(r, blueBars, bottom=greenBars, color='#a3acff', edgecolor='white', width=barWidth)
plt.title('Arrests as % of Total Population for each Race in South Dakota')
plt.xticks(r, names)
plt.xlabel("Race")
plt.ylabel("%")
plt.show()
r = ['White', 'Black', 'Other']
rawData = {'greenBars':[10357, 1621, 6229+163+36+2269], 'blueBars':[718783+22087, 18895+162, 74907+3161+14791+232+51+271+5511+20554+2830]}
df = pd.DataFrame(rawData)
totals = [i+j for i,j in zip(df['greenBars'], df['blueBars'])]
greenBars = [i / j * 100 for i,j in zip(df['greenBars'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['blueBars'], totals)]
fig, ax=plt.subplots(figsize = (5,10))
barWidth = 0.85
names = r
plt.bar(r, greenBars, color='#7F8DA8', edgecolor='white', width=barWidth)
plt.bar(r, blueBars, bottom=greenBars, color='#FADFC3', edgecolor='white', width=barWidth)
plt.ylim(0,30)
plt.title('Arrests as % of Population in South Dakota', fontsize=14)
plt.xticks(r, names)
plt.xlabel("Race", fontsize=14)
plt.ylabel("Percentage", fontsize=14)
plt.show()
data_path = 'southdakota.csv'
prison = | pd.read_csv(data_path, index_col=0) | pandas.read_csv |
import pytest
from cellrank.tl._colors import _map_names_and_colors, _create_categorical_colors
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from matplotlib.colors import is_color_like
class TestColors:
def test_create_categorical_colors_too_many_colors(self):
with pytest.raises(ValueError):
_create_categorical_colors(1000)
def test_create_categorical_colors_no_categories(self):
c = _create_categorical_colors(0)
assert c == []
def test_create_categorical_colors_neg_categories(self):
with pytest.raises(RuntimeError):
_create_categorical_colors(-1)
def test_create_categorical_colors_normal_run(self):
colors = _create_categorical_colors(62)
assert len(colors) == 62
assert all(map(lambda c: isinstance(c, str), colors))
assert all(map(lambda c: is_color_like(c), colors))
class TestMappingColors:
def test_mapping_colors_not_categorical(self):
query = pd.Series(["foo", "bar", "baz"], dtype="str")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(TypeError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_size(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", np.nan, "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_different_index(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category", index=[2, 3, 4])
reference = pd.Series(["foo", "bar", "baz"], dtype="category", index=[1, 2, 3])
with pytest.raises(ValueError):
_map_names_and_colors(reference, query)
def test_mapping_colors_invalid_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(
reference, query, colors_reference=["red", "green", "foo"]
)
def test_mapping_colors_too_few_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query, colors_reference=["red", "green"])
def test_mapping_colors_simple_1(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"]).astype("category")
expected = pd.Series(["a_1", "a_2", "b"])
expected_index = pd.Index(["a", "b", "d"])
res = _map_names_and_colors(x, y)
assert isinstance(res, pd.Series)
np.testing.assert_array_equal(res.values, expected.values)
np.testing.assert_array_equal(res.index.values, expected_index.values)
def test_mapping_colors_simple_2(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res = _map_names_and_colors(reference, query)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
def test_mapping_colors_simple_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "green", "blue"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_too_many_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "green", "blue", "black"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_different_color_representation(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=[(1, 0, 0), "green", (0, 0, 1, 0)]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#008000", "#0000ff"]
def test_mapping_colors_non_unique_colors(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
res, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert isinstance(res, pd.Series)
assert len(res) == 3
assert is_categorical_dtype(res)
assert isinstance(c, list)
assert c == ["#ff0000", "#ff0000", "#ff0000"]
def test_mapping_colors_same_reference(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "foo", "foo"], dtype="category")
r, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert list(r.index) == ["bar", "baz", "foo"]
assert list(r.values) == ["foo_1", "foo_2", "foo_3"]
assert c == ["#b20000", "#d13200", "#f07300"]
def test_mapping_colors_diff_query_reference(self):
query = pd.Series(["bar", "bar", "bar"], dtype="category")
reference = pd.Series(["foo", "foo", "foo"], dtype="category")
r, c = _map_names_and_colors(
reference, query, colors_reference=["red", "red", "red"]
)
assert list(r.index) == ["bar"]
assert list(r.values) == ["foo"]
assert c == ["#ff0000"]
def test_mapping_colors_empty(self):
query = pd.Series([], dtype="category")
reference = pd.Series([], dtype="category")
r = _map_names_and_colors(reference, query)
assert isinstance(r, pd.Series)
assert is_categorical_dtype(r)
def test_mapping_colors_empty_with_color(self):
query = pd.Series([], dtype="category")
reference = pd.Series([], dtype="category")
r, c = _map_names_and_colors(reference, query, colors_reference=[])
assert isinstance(r, pd.Series)
assert is_categorical_dtype(r)
assert isinstance(c, list)
assert len(c) == 0
def test_mapping_colors_negative_en_cutoff(self):
query = pd.Series(["foo", "bar", "baz"], dtype="category")
reference = pd.Series(["foo", "bar", "baz"], dtype="category")
with pytest.raises(ValueError):
_map_names_and_colors(reference, query, en_cutoff=-1)
def test_mapping_colors_0_en_cutoff(self):
query = pd.Series(["bar", "bar", "bar"], dtype="category")
reference = | pd.Series(["bar", "bar", "bar"], dtype="category") | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 2 15:03:42 2019
@author: user
"""
import time
import itertools
import os
import tables
import shutil
from glob import glob
import fastparquet as pq
import numpy as np
import pandas as pd
import psycopg2 as pg
import grimsel
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import grimsel.core.autocomplete as ac
import grimsel.core.table_struct as table_struct
from grimsel import _get_logger
logger = _get_logger(__name__)
FORMAT_RUN_ID = '{:04d}' # modify for > 9999 model runs
class _HDFWriter:
''' Mixing class for :class:`CompIO` and :class:`DataReader`. '''
def write_hdf(self, tb, df, put_append):
'''
Opens connection to HDF file and writes output.
Parameters
----------
put_append: str, one of `('append', 'put')`
Write one-time table to the output file (`'put'`) or append
to existing table (`'append'`).
'''
with pd.HDFStore(self.cl_out, mode='a') as store:
method_put_append = getattr(store, put_append)
method_put_append(tb, df, data_columns=True, format='table',
complevel=9, complib='blosc:blosclz')
class _ParqWriter:
''' Mixing class for :class:`CompIO` and :class:`DataReader`. '''
def write_parquet(self, fn, df, engine):
'''
Opens connection to HDF file and writes output.
Parameters
----------
fn: str
filename for table writing
df: pandas DataFrame
table to be written
engine: str
engine name as in the pandas DataFrame.to_parquet parameter
'''
if self.output_target == 'fastparquet':
pq.write(fn, df, append=os.path.isfile(fn), compression='GZIP')
# df.to_parquet(fn, engine='fastparquet',
# compression='gzip',)
# if 'run_id' in df.columns:
# df.to_parquet(fn, #append=os.path.isfile(fn),
# engine='fastparquet',
# compression='gzip',
# partition_cols=['run_id'])
# else:
# df.to_parquet(fn, #append=os.path.isfile(fn),
# engine='fastparquet',
# compression='gzip'
# )
else:
raise RuntimeError('Writing using parquet engine %s '
'not implemented.'%self.output_target)
class CompIO(_HDFWriter, _ParqWriter):
'''
A CompIO instance takes care of extracting a single variable/parameter from
the model and of writing a single table to the database.
'''
def __init__(self, tb, cl_out, comp_obj, idx, connect, output_target,
model=None):
self.tb = tb
self.cl_out = cl_out
self.comp_obj = comp_obj
self.output_target = output_target
self.connect = connect
self.model = model
self.columns = None # set in index setter
self.run_id = None # set in call to self.write_run
self.index = tuple(idx) if not isinstance(idx, tuple) else idx
self.coldict = aql.get_coldict()
def post_processing(self, df):
''' Child-specific method called after reading. '''
return df
def to_df(self):
'''
Calls classmethods _to_df.
Is overwritten in DualIO, where _to_df is not used as classmethod.
'''
return self._to_df(self.comp_obj,
[c for c in self.index if not c == 'bool_out'])
def init_output_table(self):
'''
Initialization of output table.
Calls the :func:`aux_sql_func` method with appropriate parameters.
.. note:
Keys need to be added in post-processing due to table
writing performance.
'''
logger.info('Generating output table {}'.format(self.tb))
col_names = self.index + ('value',)
cols = [(c,) + (self.coldict[c][0],) for c in col_names]
cols += [('run_id', 'SMALLINT')]
pk = [] # pk added later for writing/appending performance
unique = []
aql.init_table(tb_name=self.tb, cols=cols,
schema=self.cl_out,
ref_schema=self.cl_out, pk=pk,
unique=unique, bool_auto_fk=False, db=self.connect.db,
con_cur=self.connect.get_pg_con_cur())
def _to_file(self, df, tb):
'''
Casts the data types of the output table and writes the
table to the output HDF file.
'''
dtype_dict = {'value': np.dtype('float64'),
'bool_out': np.dtype('bool')}
dtype_dict.update({col: np.dtype('int32') for col in df.columns
if not col in ('value', 'bool_out')})
df = df.astype({col: dtype for col, dtype in dtype_dict.items()
if col in df.columns})
if self.output_target == 'hdf5':
self.write_hdf(tb, df, 'append')
elif self.output_target in ['fastparquet']:
fn = os.path.join(self.cl_out,
tb + ('_%s'%FORMAT_RUN_ID).format(self.run_id) + '.parq')
self.write_parquet(fn, df, engine=self.output_target)
else:
raise RuntimeError('_to_file: no '
'output_target applicable')
def _to_sql(self, df, tb):
df.to_sql(tb, self.connect.get_sqlalchemy_engine(),
schema=self.cl_out, if_exists='append', index=False)
def _finalize(self, df, tb=None):
''' Add run_id column and write to database table '''
tb = self.tb if not tb else tb
logger.info('Writing {} to {}.{}'.format(self.comp_obj.name,
self.cl_out, tb))
# value always positive, directionalities expressed through bool_out
df['value'] = df['value'].abs()
df['run_id'] = self.run_id
t = time.time()
if self.output_target in ['hdf5', 'fastparquet']:
self._to_file(df, tb)
elif self.output_target == 'psql':
self._to_sql(df, tb)
else:
raise RuntimeError('_finalize: no '
'output_target applicable')
logger.info(' ... done in %.3f sec'%(time.time() - t))
@property
def index(self):
return self._index
@index.setter
def index(self, value):
''' Makes sure idx is tuple and updates columns attribute. '''
self._index = (value,) if not isinstance(value, tuple) else value
self.columns = list(self.index + ('value',))
self.columns = [c for c in self.columns if not c == 'bool_out']
def get_df(self):
df = self.to_df()
df = self.post_processing(df)
return df
def write(self, run_id):
self.run_id = run_id
df = self.get_df()
self._finalize(df)
def _node_to_plant(self, pt):
'''
TODO: THIS SHOULD BE IN MAPS!!!!
TODO: THIS SHOULD ALSO INCLUDE ca_id FOR DMND!
Method for translation of node_id to respective plant_id
in the cases of demand and inter-node transmission. This is used to
append demand/inter-nodal transmission to pwr table.
Returns a dictionary node -> plant
Keyword arguments:
* pt -- string, selected plant type for translation
'''
df_np = self.model.df_def_plant[['nd_id', 'pp_id', 'pp', 'pt_id']]
df_pt = self.model.df_def_pp_type[['pt_id', 'pt']]
mask_pt = df_pt.pt.apply(lambda x: x.replace('_ST', '')) == pt
slct_pp_type = df_pt.loc[mask_pt, 'pt_id'].astype(int).iloc[0]
mask_tech = df_np['pt_id'] == slct_pp_type
df_np_slct = df_np.loc[mask_tech]
dict_node_plant_slct = df_np_slct.set_index('nd_id')
dict_node_plant_slct = dict_node_plant_slct['pp_id'].to_dict()
return dict_node_plant_slct
def __repr__(self):
return 'Comp_obj: ' + str(self.comp_obj)
class DualIO(CompIO):
'''
Base class for dual values. Performs the data extraction of constraint
shadow prices.
'''
def to_df(self):
dat = [ico + (self.model.dual[self.comp_obj[ico]],)
for ico in self.comp_obj
if self.comp_obj[ico].active]
return | pd.DataFrame(dat, columns=self.columns) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
NaT,
Series,
Timedelta,
Timestamp,
)
import pandas._testing as tm
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = Timestamp.utcnow()
df = DataFrame({"a": [1, 1], "date": now_dt})
result = df.groupby("a").shift(0).iloc[0]
expected = Series({"date": now_dt}, name=result.name)
tm.assert_series_equal(result, expected)
def test_group_diff_real_series(any_real_numpy_dtype):
df = DataFrame(
{"a": [1, 2, 3, 3, 2], "b": [1, 2, 3, 4, 5]},
dtype=any_real_numpy_dtype,
)
result = df.groupby("a")["b"].diff()
exp_dtype = "float"
if any_real_numpy_dtype in ["int8", "int16", "float32"]:
exp_dtype = "float32"
expected = Series([np.nan, np.nan, np.nan, 1.0, 3.0], dtype=exp_dtype, name="b")
tm.assert_series_equal(result, expected)
def test_group_diff_real_frame(any_real_numpy_dtype):
df = DataFrame(
{
"a": [1, 2, 3, 3, 2],
"b": [1, 2, 3, 4, 5],
"c": [1, 2, 3, 4, 6],
},
dtype=any_real_numpy_dtype,
)
result = df.groupby("a").diff()
exp_dtype = "float"
if any_real_numpy_dtype in ["int8", "int16", "float32"]:
exp_dtype = "float32"
expected = DataFrame(
{
"b": [np.nan, np.nan, np.nan, 1.0, 3.0],
"c": [np.nan, np.nan, np.nan, 1.0, 4.0],
},
dtype=exp_dtype,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[
Timestamp("2013-01-01"),
Timestamp("2013-01-02"),
Timestamp("2013-01-03"),
],
[Timedelta("5 days"), | Timedelta("6 days") | pandas.Timedelta |
import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with | tm.assert_produces_warning(UserWarning) | pandas._testing.assert_produces_warning |
import calendar
import pandas as pd
import hashlib
import json
from profootballref.Parsers import PlayerParser
from profootballref.Tools import Loader
from profootballref.Tools import Passhash
from profootballref.Tools import Rechash
from profootballref.Tools import Rushhash
from profootballref.Tools import Kickhash
from profootballref.Tools import Defhash
pd.set_option('display.max_columns', None)
class GameLog:
def __init__(self):
pass
def common(self, dframe, year):
# Drop rk col
dframe = dframe.drop(['Rk'], axis=1)
# drop summary line
dframe = dframe.loc[pd.notnull(dframe.loc[:, 'G#'])]
# clean home/away
dframe.loc[:, 'Home'].fillna('H', inplace=True)
homemap = {'H': True, '@': False}
dframe.loc[:, 'Home'] = dframe.loc[:, 'Home'].map(homemap)
# map if the player was listed as a starter, this is only used for QBs
if 'GS' in dframe.columns:
dframe.loc[:, 'GS'] = dframe.loc[:, 'GS'].fillna('False')
gsmap = {'*': True, 'False': False}
dframe.loc[:, 'GS'] = dframe.loc[:, 'GS'].map(gsmap)
# Recalculate age. profootball ref gives Age in year and days so we'll turn that into a float
if calendar.isleap(year):
days = 366
else:
days = 365
dframe.loc[:, 'Age'] = dframe.loc[:, 'Age'].astype(str)
age = dframe.loc[:, 'Age'].str.split('.', expand=True)
age[0] = pd.to_numeric(age[0])
age[1] = pd.to_numeric(age[1])
dframe.loc[:, 'Age'] = age[1] / days + age[0]
# Split wins and loses w/ points for (PF) and points againat (PA)
rec = dframe.loc[:, 'Result'].str.split(' ', expand=True)
dframe.loc[:, 'Result'] = rec[0]
dframe.loc[:, 'PF'] = pd.to_numeric(rec[1].str.split('-', expand=True)[0])
dframe.loc[:, 'PA'] = pd.to_numeric(rec[1].str.split('-', expand=True)[1])
return dframe
def passing(self, player_link, year, **kwargs):
# Set up the gamelog suffix
gamelog_suffix = '/gamelog/%s/' % year
# Modify the player url to point to the gamelog
log_url = player_link[:-4] + gamelog_suffix
# Get html
html = Loader.Loader().load_page(log_url).content.decode()
# gent general stats
gen = PlayerParser.PlayerParser().parse_general_info(html)
# parse tables w pandas
df = pd.read_html(html)[0]
# drop first level of cols
df.columns = df.columns.droplevel()
# rename the home column
df = df.rename(columns={df.columns[5]: "Home"})
# There may be many extra blank cols, delet them
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
# send df to the common parser
df = self.common(df, year)
# Add the name
df.loc[:, 'Name'] = gen['name']
# Add the players position
df.loc[:, 'Pos'] = gen['position']
# add additional player info
df['Throws'] = gen['throws']
df['Height'] = gen['height']
df['Weight'] = gen['weight']
df['DOB_mo'] = gen['bday_mo']
df['DOB_day'] = gen['bday_day']
df['DOB_yr'] = gen['bday_yr']
df['College'] = gen['college']
return df
def receiving(self, player_link, year, **kwargs):
# Set up the gamelog suffix
gamelog_suffix = '/gamelog/%s/' % year
# Modify the player url to point to the gamelog
log_url = player_link[:-4] + gamelog_suffix
# Get html
html = Loader.Loader().load_page(log_url).content.decode()
# ************** generate general stats, these need to be combined later ******************
gen = PlayerParser.PlayerParser().parse_general_info(html)
# parse tables w pandas
df = pd.read_html(html)[0]
# hash the columns to determine which fields are being used
which_cols = hashlib.md5(json.dumps(list(df.columns.levels[0])).encode()).hexdigest()
# Here we make a dict of hashes and their corresponding column parser, this is faster than if/else
options = { "b3c4237d9a10de8cfaad61852cb552c4": Rechash.RecHash().md5b3c4237d9a10de8cfaad61852cb552c4,
"bcb96297b50fb2120f475e8e05fbabcd": Rechash.RecHash().md5bcb96297b50fb2120f475e8e05fbabcd,
"4560c290b45e942c16cc6d7811345fce": Rechash.RecHash().md54560c290b45e942c16cc6d7811345fce,
"4c82a489ec5b2c943e78c9018dcbbca1": Rechash.RecHash().md54c82a489ec5b2c943e78c9018dcbbca1,
"e8ffc7202223bb253e92da83b76e9944": Rechash.RecHash().md5e8ffc7202223bb253e92da83b76e9944,
"50fcceaa170b1a1e501e3f40548e403d": Rechash.RecHash().md550fcceaa170b1a1e501e3f40548e403d,
"e160e714b29305ecfecf513cbf84b80f": Rechash.RecHash().md5e160e714b29305ecfecf513cbf84b80f,
"111e8480632f73642d7e20acbdbe6b16": Rechash.RecHash().md5111e8480632f73642d7e20acbdbe6b16,
"adc05c5af0f88775d3605d02c831c0ed": Rechash.RecHash().md5adc05c5af0f88775d3605d02c831c0ed,
"bfbf86ae0485a0a70692ae04124449b9": Rechash.RecHash().md5bfbf86ae0485a0a70692ae04124449b9,
"6b4698269dd34a823cf6b233c6165614": Rechash.RecHash().md56b4698269dd34a823cf6b233c6165614,
"7f97f3885d50fcf9b92797810856a89f": Rechash.RecHash().md57f97f3885d50fcf9b92797810856a89f,
"aa321161d6f3f5230259dbc4ae67299a": Rechash.RecHash().md5aa321161d6f3f5230259dbc4ae67299a,
"1193d47266d4acdcf1b6fca165121100": Rechash.RecHash().md51193d47266d4acdcf1b6fca165121100,
"52589e869a13d76c6d0dbf066cab536f": Rechash.RecHash().md552589e869a13d76c6d0dbf066cab536f,
"d522b9357244c20714a3b21f8f404918": Rechash.RecHash().md5d522b9357244c20714a3b21f8f404918}
df = options[which_cols](df)
# send df to the common parser
df = self.common(df, year)
# Add the name
df.loc[:, 'Name'] = gen['name']
# Add the players position
df.loc[:, 'Pos'] = gen['position']
df['Throws'] = gen['throws']
df['Height'] = gen['height']
df['Weight'] = gen['weight']
df['DOB_mo'] = gen['bday_mo']
df['DOB_day'] = gen['bday_day']
df['DOB_yr'] = gen['bday_yr']
df['College'] = gen['college']
df = df[['Name', 'Pos', 'Height', 'Weight', 'DOB_mo', 'DOB_day', 'DOB_yr', 'College'] +
Rechash.RecHash().base[1:] + ['PF', 'PA'] + Rechash.RecHash().receiving + Rechash.RecHash().rushing +
Rechash.RecHash().kick_rt + Rechash.RecHash().punt_rt + Rechash.RecHash().scoring2p +
Rechash.RecHash().scoring]
return df
def rushing(self, player_link, year, **kwargs):
# Set up the gamelog suffix
gamelog_suffix = '/gamelog/%s/' % year
# Modify the player url to point to the gamelog
log_url = player_link[:-4] + gamelog_suffix
# Get html
html = Loader.Loader().load_page(log_url).content.decode()
# ************** generate general stats, these need to be combined later ******************
gen = PlayerParser.PlayerParser().parse_general_info(html)
# parse tables w pandas
df = | pd.read_html(html) | pandas.read_html |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from tqdm import tqdm as pb
import datetime
import re
import warnings
import matplotlib.pyplot as plt
import pylab as mpl
from docx import Document
from docx.shared import Pt
from data_source import local_source
def concat_ts_codes(df): #拼接df中所有TS_CODE为输入条件的格式
result = ''
for code in df["TS_CODE"]:
result = result + 'TS_CODE = "' + code + '" or '
result = result[:-4]
return result
def drop_duplicates_keep_nonnan(df,subset): #保留nan最少的行, 暂时没用
warnings.filterwarnings("ignore")
subset_values = []
df_result = pd.DataFrame(columns=df.columns)
for i in range(len(df)):
subset_value = list(df[subset].iloc[i,:])
if subset_value not in subset_values: subset_values.append(subset_value)
for subset_value in subset_values:
df_sub = df[(df[subset]==subset_value).product(axis=1)==1]
df_sub["nan_count"] = 0
df_sub.loc[:,"nan_count"] = df_sub.isnull().sum(axis=1)
df_sub.sort_values(by='nan_count',ascending=True, inplace=True)
df_sub = pd.DataFrame(df_sub.iloc[0,:]).T
df_result = pd.concat([df_result, df_sub],axis=0)
warnings.filterwarnings("default")
return df_result
#tester = pd.DataFrame([[1,1,5,5,5],[1,1,5,np.nan,np.nan],[2,2,5,5,5],[2,2,np.nan,5,5],[2,1,np.nan,np.nan,np.nan]],columns=['a','b','c','d','e'])
#tester2 = drop_duplicates_keep_nonnan(df=tester, subset=['a','b'])
def Find_Comparibles(ts_code, df_ind): #按总市值差选取对比公司, 可改进。输入df需要有END_DATE和INDUSTRY和TOTAL_MV列
stocks_used = df_ind.copy()
stocks_used["END_DATE"] = stocks_used["END_DATE"].astype(int)
last_end_date = max(stocks_used["END_DATE"])
stocks_used = stocks_used[stocks_used["END_DATE"]==last_end_date]
stocks_used["TOTAL_MV_diff"] = abs( stocks_used["TOTAL_MV"] - stocks_used.loc[stocks_used["TS_CODE"]==ts_code, "TOTAL_MV"].iloc[0] )
stocks_used.sort_values(by="TOTAL_MV_diff", ascending=True,inplace=True)
stocks_used = stocks_used[1:3]
return list(stocks_used["TS_CODE"])
def RatioComparation_Plotter(df,var_name, year=5):
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1,1,1)
ax.set_title("{var_name}趋势图".format(var_name=var_name))
ax.set_xlabel("年份",labelpad=0, position=(0.5,1))
ax.set_ylabel("{var_name}".format(var_name=var_name),labelpad=0, position=(1,0.5))
for stock in df["TS_CODE"].unique():
x = df.loc[df["TS_CODE"]==stock,"END_DATE_year"].iloc[(-1*year):]
y = df.loc[df["TS_CODE"]==stock, var_name].iloc[(-1*year):]
ax.plot(x,y,linewidth='1',label="{stock}".format(stock=stock))
ax.legend(loc="upper right",bbox_to_anchor=(1.4,1),shadow=True)
plt.show()
def FSA_Initializer(ts_code):
basic = local_source.get_stock_list(condition='TS_CODE = '+'"'+ts_code+'"')
ind = basic["INDUSTRY"].iloc[0]
stocks_ind = local_source.get_stock_list(condition='INDUSTRY = '+'"'+ind+'"')
ts_codes_ind = concat_ts_codes(stocks_ind)
quotations_monthly_ind = local_source.get_quotations_monthly(cols='TRADE_DATE,TS_CODE,CLOSE',condition=ts_codes_ind).sort_values(by="TRADE_DATE", ascending=True)
quotations_monthly_ind.rename(columns={'TRADE_DATE':'END_DATE'}, inplace = True)
stock_indicators_daily_ind = local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition=ts_codes_ind).sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_ind.rename(columns={'TRADE_DATE':'END_DATE'}, inplace = True)
financial_indicators_ind = local_source.get_financial_indicators(condition=ts_codes_ind).sort_values(by="END_DATE", ascending=True)
stocks_ind = pd.merge(financial_indicators_ind,stocks_ind, on=['TS_CODE'], how="left")
stocks_ind = | pd.merge(stocks_ind, quotations_monthly_ind, on=['TS_CODE','END_DATE'], how="left") | pandas.merge |
"""
Utilities that help with the building of tensorflow keras models
"""
import io
from muti import chu, genu
import tensorflow as tf
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.io as pio
from plotly.subplots import make_subplots
import warnings
import os
import math
import multiprocessing
def polynomial_decay_learning_rate(step: int, learning_rate_start: float, learning_rate_final: float,
decay_steps: int, power: float):
"""
Manual implementation of polynomial decay for learning rate
:param step: which step we're on
:param learning_rate_start: learning rate for epoch 0
:param learning_rate_final: learning rate for epoch decay_steps
:param decay_steps: epoch at which learning rate stops changing
:param power: exponent
:return:
"""
if step <= decay_steps:
delta = float(learning_rate_start - learning_rate_final)
lr = delta * (1.0 - float(step) / float(decay_steps)) ** power + learning_rate_final
return lr
return learning_rate_final
def get_pred(yh, column=None, wts=None):
"""
Returns an array of predicted values from a keras predict method. If column is None, then this
assumes the output has one column and it returns a flattened array.
If column is an int, it returns that column from the prediction matrix.
If column is a list of int, it returns the column sums
:param yh: keras model prediction
:param column: which column(s) to return, int or list of int
:param wts: array of weights. if yh is n x p, wts has length p. nd.array if specified
:return: prediction array
:rtype nd.array
"""
if wts is not None:
yh = yh * wts
if column is None:
return np.array(yh).flatten()
if not isinstance(column, list):
return yh[:, column]
# sum up columns
return np.sum(yh[:, column], axis=1)
def model_predictions(df: pd.DataFrame, specs: list, in_place = True, log_odds=False):
"""
find the predicted values for a keras model
:param: df - data frame to run the model over
:param specs - specifications of model. list elements
[0] - location
[1] - features_dict
[2] - target of model
[3] - column(s)
[4] - output name
:param log_odds: if true, take log-odds of result
:return:
"""
modl = tf.keras.models.load_model(specs[0])
ds = get_tf_dataset(specs[1], specs[2], df, 1000, 1)
yh = get_pred(modl.predict(ds), specs[3])
if log_odds:
i = yh == 1.0
yh[i] = .999999
i = yh == 0.0
yh[i] = 0.000001
yh = np.log(yh / (1.0 - yh))
if in_place:
df[specs[4]] = yh
return
else:
return yh
def plot_history(history: dict, groups=['loss'], metric='loss', first_epoch=0, title=None, plot_dir=None, in_browser=False):
"""
plot the history of metrics from a keras model tf build
:param history: history returned from keras fit
:param groups: groups to plot
:param metric: metric to plot
:param first_epoch: first element to plot
:param title: title for plot
:param plot_dir: directory to plot to
:param in_browser: if True display in browser
:return:
"""
fig = []
for g in groups:
x = np.arange(first_epoch, len(history[g]) - first_epoch)
y = history[g][first_epoch:len(history[metric])]
fig += [go.Scatter(x=x, y=y, name=g)]
if title is None:
title = 'TensorFlow Model Build<br>' + metric
layout = go.Layout(title=title,
xaxis=dict(title='Epoch'),
yaxis=dict(title=metric))
figx = go.Figure(fig, layout=layout)
if in_browser:
pio.renderers.default = 'browser'
figx.show()
if plot_dir is not None:
os.makedirs(plot_dir, exist_ok=True)
plot_file = plot_dir + metric + '.png'
figx.write_image(plot_file)
plot_file = plot_dir + metric + '.html'
figx.write_html(plot_file)
def build_column(feature_name: str, feature_params: list, out_path=None, print_details=True):
"""
Returns a tensorflow feature columns and, optionally, the vocabulary for categorical and
embedded features. Optionally creates files of the vocabularies for use in TensorBoard.
:param feature_name: name of the feature
:param feature_params:
Element 0: type of feature ('cts'/'spl', 'cat', 'emb').
Element 1: ('cat', 'emb') vocabulary list (list of levels)
Element 2: ('cat', 'emb') default index. If None, 0 is used
Element 3: ('emb') embedding dimension
:param out_path: path to write files containing levels of 'cat' and 'emb' variables
:param print_details: print info about each feature
:return: tf feature column and (for 'cat' and 'emb') a list of levels (vocabulary)
"""
if feature_params[0] == 'cts' or feature_params[0] == 'spl':
if print_details:
print('col {0} is numeric'.format(feature_name))
return tf.feature_column.numeric_column(feature_name)
# categorical and embedded features
if feature_params[0] in ['cat', 'emb']:
vocab = feature_params[1]
# save vocabulary for TensorBoard
if out_path is not None:
if out_path[-1] != '/':
out_path += '/'
if not os.path.isdir(out_path):
os.makedirs(out_path)
f = open(out_path + feature_name + '.txt', 'w')
f.write('label\tId\n')
for j, s in enumerate(vocab):
f.write(str(s) + '\t' + str(j) + '\n')
f.close()
dv = [j for j in range(len(vocab)) if vocab[j] == feature_params[2]][0]
col_cat = tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocab,
default_value=dv)
# go with 1-hot encoding
if feature_params[0] == 'cat':
col_ind = tf.feature_column.indicator_column(col_cat)
if print_details:
print('col {0} is categorical with {1} levels'.format(feature_name, len(vocab)))
return col_ind
# for embedded features, the third element of feature_params input is the dimension of the
# embedding
levels = feature_params[3]
col_emb = tf.feature_column.embedding_column(col_cat, levels)
if print_details:
print('col {0} is embedded with {1} levels'.format(feature_name, levels))
return col_emb
def build_model_cols(feature_dict: dict, out_vocab_dir=None, print_details=True):
"""
Builds inputs needed to specify a tf.keras.Model. The tf_cols_* are TensorFlow feature_columns. The
inputs_* are dictionaries of tf.keras.Inputs. The tf_cols_* are used to specify keras.DenseFeatures methods and
the inputs_* are the inputs to those layers.
:param feature_dict: dictionary of features to build columns for. The key is the feature name. The entry is a list:
feature type (str) 'cts'/'spl', 'cat', 'emb'
list of unique levels for 'cat' and 'emb'
embedding dimension for 'emb'
:param out_vocab_dir: directory to write out unique levels
:return: 4 lists:
- tf_cols_cts: tf.feature_column defining each continuous feature
- inputs_cts: list of tf.keras.Inputs for each continuous column
- tf_cols_cat: tf.feature_column defining each categorical ('cat','emb') feature
- inputs_cat: list of tf.keras.Inputs for each categorical ('cat', 'emb') column
The tf_cols_* are used in tf.keras.layers.DenseFeatures
the inputs_* are used to define the inputs to those tensors
"""
tf_cols_cts = []
tf_cols_cat = []
inputs_cts = {}
inputs_cat = {}
for feature in feature_dict.keys():
if feature_dict[feature][0] == 'cts' or feature_dict[feature][0] == 'spl':
feat = build_column(feature, feature_dict[feature], print_details=print_details)
tf_cols_cts += [feat]
inputs_cts[feature] = tf.keras.Input(shape=(1,), name=feature)
else:
feat = build_column(feature, feature_dict[feature], out_vocab_dir, print_details=print_details)
tf_cols_cat += [feat]
inputs_cat[feature] = tf.keras.Input(shape=(1,), name=feature, dtype=tf.string)
return tf_cols_cts, inputs_cts, tf_cols_cat, inputs_cat
def get_tf_dataset(feature_dict: dict, target: str, df: pd.DataFrame, batch_size: int, repeats=0):
"""
build a tf dataset from a pandas DataFrame
:param feature_dict: dictionary whose keys are the features
:param target: target var
:param df: pandas DataFrame to work on
:param batch_size: Batch size
:param repeats: how many repeats of the dataset (None = infinite)
:return: tf dataset
"""
buffer_size = df.shape[0]
tf_ds = tf.data.Dataset.from_tensor_slices((dict(df[feature_dict.keys()]), df[target]))
# tf_ds = tf_ds.batch(batch_size, drop_remainder=True, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE).repeat().prefetch(buffer_size)
if repeats == 0:
tf_ds = tf_ds.shuffle(reshuffle_each_iteration=True, buffer_size=buffer_size)
tf_ds = tf_ds.batch(batch_size, drop_remainder=True, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE)
tf_ds = tf_ds.prefetch(buffer_size=buffer_size)
tf_ds = tf_ds.cache()
else:
tf_ds = tf_ds.batch(batch_size, deterministic=False, num_parallel_calls=tf.data.AUTOTUNE).repeat(repeats).prefetch(buffer_size)
return tf_ds
def incr_build(model, by_var, start_list, add_list, get_data_fn, sample_size, feature_dict, target_var,
batch_size, epochs_list, global_valid_df_in,
model_dir=None, plot=False, verbose=0, output_size = 1, **kwargs):
"""
This function builds a sequence of models. The get_data_fn takes a list of values as contained in
start_list and add_list and returns data subset to those values. The initial model is built on the
values of start_list and then evaluated on the data subset to the first value of add_list.
At the next step, the data in the first element of add_list is added to the start_list data, the model
is updated and the evaluation is conducted on the second element of add_list
:param model: input model structure
:type model: tf keras model
:param start_list: list of (general) time periods for model build for the first model build
:type start_list: list
:param add_list: list of out-of-time periods to evaluate
:type add_list: list
:param get_data_fn: function to get a pandas DataFrame of data to work on
:type get_data_fn: function
:param sample_size: size of pandas DataFrames to get
:type sample_size: int
:param feature_dict: dictionary of features in the model
:type feature_dict: dict
:param target_var: target variable of model build
:type target_var: str
:param batch_size: size of batches for model build
:type batch_size: int
:param epochs_list: list (length 2) of epochs for model fit; entry 0 is initial model, entry 1 is subsequent
models
:type epochs_list: list
:param global_valid_df_in: DataFrame that includes all the segments in add_list -- for validation
:type global_valid_df_in: pandas DataFrame
:param model_dir: directory to save models
:type model_dir: str
:param plot: if True, plot history
:type plot: bool
:param verbose: print verobisity for keras.fit (0 = quiet, 1 = normal level, 2=talkative)
:type verbose int
:param output_size: the number of columns returned by keras model predict
:type output_size: int
:return: lists of out-of-sample values:
add_list
rmse root mean squared error
corr correlation
"""
if model_dir is not None:
if model_dir[-1] != '/':
model_dir += '/'
if os.path.isdir(model_dir):
os.system('rm -r ' + model_dir)
os.makedirs(model_dir)
build_list = start_list
epochs = epochs_list[0]
segs = []
global_valid_df = global_valid_df_in.copy()
# validation data
if output_size == 1:
global_valid_df['model_dnn_inc'] = np.full((global_valid_df.shape[0]), 0.0)
else:
for c in range(output_size):
global_valid_df['model_dnn_inc' + str(c)] = np.full((global_valid_df.shape[0]), 0.0)
global_valid_ds = get_tf_dataset(feature_dict, target_var, global_valid_df, 10000, 1)
for j, valid in enumerate(add_list):
segs += [valid]
model_df = get_data_fn(build_list, sample_size, **kwargs)
steps_per_epoch = int(model_df.shape[0] / batch_size)
model_ds = get_tf_dataset(feature_dict, target_var, model_df, batch_size=batch_size)
valid_df = get_data_fn([valid], sample_size, **kwargs)
valid_ds = get_tf_dataset(feature_dict, target_var, valid_df, batch_size=batch_size, repeats=1)
print('Data sizes for out-of-sample value {0}: build {1}, validate {2}'.format(valid, model_df.shape[0],
valid_df.shape[0]))
history = model.fit(model_ds, epochs=epochs, steps_per_epoch=steps_per_epoch,
validation_data=valid_ds, verbose=verbose)
gyh = model.predict(global_valid_ds)
i = global_valid_df[by_var] == valid
if output_size == 1:
global_valid_df.loc[i, 'model_dnn_inc'] = gyh[i]
else:
for c in range(output_size):
global_valid_df.loc[i, 'model_dnn_inc' + str(c)] = gyh[i][:,c]
build_list += [valid] # NOTE Accumulates
# build_list = [valid] # NOTE Accumulates NOT
if model_dir is not None:
out_m = model_dir + "before_" + valid + '.h5'
model.save(out_m, overwrite=True, save_format='h5')
if plot:
title = 'model loss\n' + 'Training up to ' + valid
plot_history(history, ['loss', 'val_loss'], 'loss', title=title)
epochs = epochs_list[1]
return segs, global_valid_df
def _marginal_cts(model: tf.keras.Model, column, features_dict: dict, sample_df: pd.DataFrame,
target: str, num_grp: int, num_sample: int, title: str,
sub_titles: str, cols: list):
"""
Build a Marginal Effects plot for a continuous feature
:param model: model
:param column: column(s) of model output, either an int or list of ints
:param features_dict: features in the model
:param sample_df: DataFrame operating on
:param target: target feature
:param num_grp: # of groups model output is sliced into
:param num_sample: # of obs to take from sample_df to build graph
:param title: title for graph
:param sub_titles: titles for subplots
:param cols: colors to use: list of str
:return: plotly_fig and importance metric
"""
sub_titles[6] = 'Box Plots'
# 't' is top spacing, 'b' is bottom, 'None' means there is no graph in that cell. We make
# 2 x 7 -- eliminating the (2,7) graph and putting the RHS graph in the (1,7) position
fig = make_subplots(rows=2, cols=num_grp + 1, subplot_titles=sub_titles,
row_heights=[1, .5],
specs=[[{'t': 0.07, 'b': -.1}, {'t': 0.07, 'b': -.10}, {'t': 0.07, 'b': -.10},
{'t': 0.07, 'b': -.10}, {'t': 0.07, 'b': -.10}, {'t': 0.07, 'b': -.10},
{'t': 0.35, 'b': -0.35}],
[{'t': -0.07}, {'t': -.07}, {'t': -.07}, {'t': -0.07}, {'t': -.07},
{'t': -.07}, None]])
# start with top row graphs
# find ranges by MOG and merge
lows = sample_df.groupby('grp')[target].quantile(.01)
highs = sample_df.groupby('grp')[target].quantile(.99)
both = pd.merge(left=lows, right=highs, left_index=True, right_index=True)
both.rename(columns={target + '_x': 'low', target + '_y': 'high'}, inplace=True)
# repeat these to accomodate the range of the feature we're going to build next
to_join = pd.concat([both] * 11).sort_index()
# range of the feature
xval = np.arange(11) / 10
xval = np.concatenate([xval] * num_grp)
to_join['steps'] = xval
to_join[target] = to_join['low'] + (to_join['high'] - to_join['low']) * to_join['steps']
# now sample the DataFrame
samps = sample_df.groupby('grp').sample(num_sample, replace=True)
samp_num = pd.Series(np.arange(samps.shape[0]))
samps.index = samp_num
samp_num.name = 'samp_num'
samps = | pd.concat([samps, samp_num], axis=1) | pandas.concat |
###################################
# CALIBRATION DETECTION AND CORRECTION #
###################################
# This file includes functionality for identification and correction of calibration events.
# Functions include detection based on edges or persistence restricted by day of week and hour of day, identification
# of gap values as input to correction, and linear drift correction.
from pyhydroqc import anomaly_utilities
import pandas as pd
import numpy as np
def calib_edge_detect(observed, width, calib_params, threshold=float("nan"), num_events=1, alpha=float("nan")):
"""
calib_edge_detect seeks to find likely calibration event candidates by using edge filtering
Arguments:
observed: time series of observations
width: the width of the edge detection filter
calib_params: parameters defined in the parameters file
hour_low: earliest hour for calibrations to have occurred
hour_high: latest hour for calibrations to have occurred
threshold: used for determining candidates from edge filter results
num_events: the number of calibration event candidates to return
alpha: used for determining a threshold from the data
Returns:
candidates: datetimes of the most likely calibration event candidates
edge_diff: differences indicating degree of edges
"""
# TODO: add functionality for num_events and alpha
candidates = []
edge_diff = pd.DataFrame(index=observed.index) # diff['val'] is the filter output
edge_diff['val'] = 0
for i in range(width, len(observed) - width): # loop over every possible difference calculation
# implement the edge detection filter - difference of the sums of before and after data
edge_diff.iloc[i] = (sum(observed[i - width:i]) - sum(observed[i:i + width])) / width
if not np.isnan(threshold): # if the function is being called with a threshold
# iterate over each day, this assumes that a sensor will not be calibrated twice in one day
for idx, day in edge_diff.groupby(edge_diff.index.date):
if max(abs((day['val']))) > threshold: # if any value is above the threshold in that day
candidates.append(pd.to_datetime(day.idxmax()['val'])) # add it to the list of calibration candidates
# specify that calibrations would only occur on work days and between specified hours of the day
candidates = np.array(candidates)
candidates = candidates[( | pd.to_datetime(candidates) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu June 7 22:33:07 2019
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
from scipy import stats
import matplotlib.pyplot as plt
import os
# set saving path
path_result_freq = "/home/bruce/Dropbox/Project/5.Result/5.Result_Nov/2.freq_domain/"
def correlation_matrix(corr_mx, cm_title):
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels = ['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels = ['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='binary')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
# fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_min_01_comb(corr_mx1 ,corr_mx2, cm_title1, cm_title2):
# find the minimum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx1)
output1 = (temp == temp.min(axis=1)[:,None]) # along rows
temp = np.asarray(corr_mx2)
output2 = (temp == temp.min(axis=1)[:,None]) # along rows
fig, (ax1, ax2) = plt.subplots(1, 2)
# figure 1
im1 = ax1.matshow(output1, cmap='binary')
#fig.colorbar(im1, ax1)
ax1.grid(False)
ax1.set_title(cm_title1)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# figure 2
im2 = ax2.matshow(output2, cmap='binary')
#fig.colorbar(im2, ax2)
ax2.grid(False)
ax2.set_title(cm_title2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels, fontsize=6)
ax1.set_yticklabels(ylabels, fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# eg: plot_mag_db(df_as_85_vsc, 1, "Subject")
def fig_mag_db(signal_in, subject_number = 'subject_number', title = 'title', filename = 'filename'):
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal_in.iloc[2*(subject_number-1), :48030], '-')
plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030], '-')
plt.ylabel('magnitude')
plt.legend(('Retest', 'Test'), loc='upper right')
plt.title(title)
# plt.subplot(2,1,2)
# plt.plot(signal_in.iloc[2*(subject_number-1), :48030].apply(f_dB), '-')
# plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030].apply(f_dB), '-')
# plt.xlabel('Frequency(Hz)')
# plt.ylabel('dB')
# plt.xlim(0,10000)
# plt.legend(('Retest', 'Test'), loc='lower right')
plt.show()
plt.savefig(filename)
# plot time domain signal in one figure
def fig_time_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 1024, 1)
plt.plot(x_label, signal_in.iloc[2*i, :1024], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :1024], '-')
plt.ylabel(sub_title[i])
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_sameday_time_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 1024, 1)
plt.plot(x_label, signal_in.iloc[2*i, :1024], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :1024], '-')
pcc, p_value = stats.pearsonr(signal_in.iloc[2*i, :1024], signal_in.iloc[2*i+1, :1024])
if pcc > 0.1:
plt.ylabel(sub_title[i] + "\n%.3f" % pcc)
print (sub_title[i] + " %.3f" % pcc)
else:
plt.ylabel(sub_title[i] + "\n%.3f" % pcc, color='r')
#plt.text(1000, -0.2, "PCC is %.3f" % pcc, ha='right', va='top')
plt.legend(('no.1', 'no.2'), loc='upper right', fontsize='xx-small')
# print ("subject number:" , sub_title[i] , " PCC:" , pcc)
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot frequency domain signal in one figure
def fig_mag_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1300)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_sameday_mag_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-')
pcc, p_value = stats.pearsonr(signal_in.iloc[2*i, :13000], signal_in.iloc[2*i+1, :13000])
if pcc >0.5:
plt.ylabel(sub_title[i]+ "\n%.3f" % pcc)
print (sub_title[i]+ " %.3f" % pcc)
else:
plt.ylabel(sub_title[i]+ "\n%.3f" % pcc, color='r')
plt.xlim(0,1300)
# plt.ylim(top=100)
plt.legend(('no.1', 'no.2'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def snr_peak_ratio (signal_in, num_peak=7):
# input -> amplitude spectrum
# divide (peak +_15Hz range) by the rest signal
signal_in = np.asarray(signal_in)
snr_numerator= 0
snr_denominator= 0
for j in range (num_peak):
snr_numerator = snr_numerator + (signal_in[1000*j+850:1000*j+1150]**2).sum()
snr_denominator = snr_denominator + (signal_in[1000*j+150:1000*j+850]**2).sum()
snr_numerator = snr_numerator - (signal_in[(1000*num_peak):(1000*num_peak+150)]**2).sum()
snr_numerator = snr_numerator/(300*(num_peak-1)+150)
snr_denominator = snr_denominator + (signal_in[0:150]**2).sum()
snr_denominator = snr_denominator/(700*(num_peak-1)+850)
snr_ratio = snr_numerator/snr_denominator
return snr_ratio
def fig_sameday_as_SNR_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
snr_ratio_1 = snr_peak_ratio(signal_in.iloc[2*i, :48030], num_peak=7)
snr_ratio_2 = snr_peak_ratio(signal_in.iloc[2*i+1, :48030], num_peak=7)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-', label='no.1 SNR: %.4f' % snr_ratio_1)
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-', label='no.2 SNR: %.4f' % snr_ratio_2)
# select the peak with 30Hz width
if snr_ratio_1 > 9.7253 or snr_ratio_1 > 9.7253 :
plt.ylabel(sub_title[i])
print (sub_title[i])
else:
plt.ylabel(sub_title[i], color='r')
plt.xlim(0,1300)
# plt.ylim(top=100)
#plt.legend(('no.1 %.4f', 'no.2%.4f') %(snr_ratio_1, snr_ratio_2), loc='upper right', fontsize='xx-small')
plt.legend(loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_sameday_as_SNR_in_1_avg_8(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
snr_sum=[]
for j in range(8):
snr_ratio_1 = snr_peak_ratio(signal_in.iloc[44*j+2*i, :48030], num_peak=7)
snr_ratio_2 = snr_peak_ratio(signal_in.iloc[44*j+2*i+1, :48030], num_peak=7)
snr_sum.append(snr_ratio_1)
snr_sum.append(snr_ratio_2)
snr_avg = sum(snr_sum)/len(snr_sum)
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
snr_ratio_1 = snr_peak_ratio(signal_in.iloc[2*i, :48030], num_peak=7)
snr_ratio_2 = snr_peak_ratio(signal_in.iloc[2*i+1, :48030], num_peak=7)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-', label='no.1 SNR: %.4f' % snr_ratio_1)
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-', label='no.2 SNR: %.4f' % snr_ratio_2)
# select the peak with 30Hz width
if snr_avg > 4:
plt.ylabel(sub_title[i] + "\n%.3f" % snr_avg)
print (sub_title[i] + " %.3f" % snr_avg)
else:
plt.ylabel(sub_title[i] + "\n%.3f" % snr_avg, color='r')
plt.xlim(0,1300)
# plt.ylim(top=100)
#plt.legend(('no.1 %.4f', 'no.2%.4f') %(snr_ratio_1, snr_ratio_2), loc='upper right', fontsize='xx-small')
plt.legend(loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_test_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def fig_retest_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i+1, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def distance_mx(sig_in):
# freq_range -> from 0 to ???
freq_range = 13000
matrix_temp = np.zeros((22, 22))
matrix_temp_square = np.zeros((22, 22))
for i in range(22):
for j in range(22):
temp = np.asarray(sig_in.iloc[2*i, 0:freq_range] - sig_in.iloc[2*j+1, 0:freq_range])
temp_sum = 0
temp_square_sum = 0
for k in range(freq_range):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp[k])
temp_square_sum = temp_square_sum + (abs(temp[k]))**2
matrix_temp[i][j] = temp_sum
matrix_temp_square[i][j] = temp_square_sum
output_1 = pd.DataFrame(matrix_temp)
output_2 = | pd.DataFrame(matrix_temp_square) | pandas.DataFrame |
import numpy as np
import pandas as pd
import logging
from utils.utils import count_nulls
import etl.processing_raw as processing_raw
import etl.processing_l1 as processing_l1
from io import StringIO
import geopandas as gpd
logger = logging.getLogger(__name__)
def transform_raw(sources: dict, config: dict) -> dict:
src_l1 = {}
logger.info('********** Getting information from dataframes **********')
for df_name, df in sources.items():
# get function name to execute
if df_name in config['common_datasets_transformations']:
process_function_name = 'process_generic_dataset'
elif df_name in config['skip_processing_raw']:
process_function_name = 'skip_processing'
else:
process_function_name = f'process_{df_name}'
# execute function
try:
func = getattr(processing_raw, process_function_name)
except Exception:
logger.warning(f'Function {process_function_name} not implemented')
else:
logger.info(f'Calling {process_function_name} for {df_name}')
config['current_df'] = df_name
df_l1 = func(df, config)
if type(df_l1) == pd.DataFrame:
src_l1[df_name] = df_l1
elif type(df_l1) == dict:
src_l1 = {**src_l1, **df_l1}
return src_l1
def transform_l1(sources: dict, min_year: int, max_year: int) -> dict:
src_l2 = {}
for df_name, dfs in sources.items():
process_function_name = f'process_{df_name}'
# execute function
try:
func = getattr(processing_l1, process_function_name)
except Exception:
logger.warning(f'Function {process_function_name} not '
f'implemented. Returning df without transforming')
src_l2[df_name] = dfs[df_name]
else:
logger.info(f'Calling {process_function_name} for {df_name}')
if process_function_name == 'process_lugares':
src_l2[df_name] = func(dfs, min_year, max_year)
else:
src_l2[df_name] = func(dfs)
return src_l2
def transform_dataset(sources: dict, min_anyo: int,
max_anyo: int) -> pd.DataFrame:
dfs = sources['dataset']
# nom_barrio will be taken from superficie
dfs['ocupacion_media_piso'].drop(columns=['nom_barrio'], inplace=True)
dfs['natalidad'].drop(columns=['nom_barrio'], inplace=True)
dfs['lugares'].drop(columns=['nom_barrio'], inplace=True)
dfs['inmigracion'].drop(columns=['nom_barrio'], inplace=True)
merge_on = ['anyo', 'id_barrio']
kpis = pd.merge(dfs['superficie'], dfs['incidentes'], on=['id_barrio'],
how='left')
kpis = pd.merge(kpis, dfs['inmigracion'], on=merge_on, how='left')
kpis = pd.merge(kpis, dfs['natalidad'], on=merge_on, how='left')
kpis = pd.merge(kpis, dfs['ocupacion_media_piso'], on=merge_on, how='left')
kpis = pd.merge(kpis, dfs['precio_alquiler'], on=merge_on, how='left')
kpis = pd.merge(kpis, dfs['precio_compra_venta'], on=merge_on, how='left')
kpis = pd.merge(kpis, dfs['renta'], on=merge_on, how='left')
kpis = kpis[kpis['id_barrio'] != 99]
lugares = dfs['lugares']
group = ['id_barrio', 'anyo', 'categoria_lugar']
lugares = lugares.rename(columns={'num_locales': 'num_ubicaciones_'})
lugares = lugares.groupby(group).size().reset_index(name='num_ubic_')
lugares = lugares.set_index(group).unstack().reset_index()
col_names = [c[0] + c[1] for c in lugares.columns]
lugares.columns = col_names
num_ubics_cols = [col for col in col_names if 'num_ubic' in col]
lugares[num_ubics_cols] = lugares[num_ubics_cols].fillna(0)
dataset = | pd.merge(kpis, lugares, on=['anyo', 'id_barrio'], how='left') | pandas.merge |
import pandas as pd
import os
import importlib
reader = importlib.import_module("read_csv")
index_filename = "Index.csv"
def write_to_index(filename: str,
mean_street_quality: float,
distance: float,
speed: float,
relevant: bool):
new_data = {"Filename": filename,
"Mean_Street_Quality": mean_street_quality,
"Distance": distance,
"Speed": speed,
"Relevant": relevant}
original_data = {"Filename": [filename],
"Mean_Street_Quality": [mean_street_quality],
"Distance": [distance],
"Speed": [speed],
"Relevant": [relevant]}
if os.path.exists(index_filename):
df = pd.read_csv(index_filename, index_col=0)
df = pd.concat([df, pd.DataFrame.from_dict(original_data)], ignore_index=True, sort=False)
else:
df = | pd.DataFrame(original_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Test some of the basic _core functions
import datetime as dt
from importlib import reload
import logging
import numpy as np
import pandas as pds
import pytest
import xarray as xr
import pysat
import pysat.instruments.pysat_testing
import pysat.instruments.pysat_testing_xarray
import pysat.instruments.pysat_testing2d
import pysat.instruments.pysat_testing2d_xarray
from pysat.utils import generate_instrument_list
from pysat.utils.time import filter_datetime_input
xarray_epoch_name = 'time'
testing_kwargs = {'test_init_kwarg': True, 'test_clean_kwarg': False,
'test_preprocess_kwarg': 'test_phrase',
'test_load_kwarg': 'bright_light',
'test_list_files_kwarg': 'sleep_tight',
'test_list_remote_kwarg': 'one_eye_open',
'test_download_kwarg': 'exit_night'}
# -----------------------------------------------------------------------------
#
# Test Instrument object basics
#
# -----------------------------------------------------------------------------
class TestBasics():
def setup(self):
global testing_kwargs
reload(pysat.instruments.pysat_testing)
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument(platform='pysat', name='testing',
num_samples=10,
clean_level='clean',
update_files=True,
**testing_kwargs)
self.ref_time = pysat.instruments.pysat_testing._test_dates['']['']
self.ref_doy = int(self.ref_time.strftime('%j'))
self.out = None
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.out, self.ref_time, self.ref_doy
def support_iter_evaluations(self, values, for_loop=False, reverse=False):
"""Supports testing of .next/.prev via dates/files"""
# First, treat with no processing to provide testing as inputs
# supplied
if len(values) == 4:
# testing by date
starts = values[0]
stops = values[1]
step = values[2]
width = values[3]
self.testInst.bounds = (starts, stops, step, width)
elif len(values) == 6:
# testing by file
start_files = values[0]
starts = values[1]
stop_files = values[2]
stops = values[3]
step = values[4]
width = values[5]
self.testInst.bounds = (start_files, stop_files, step, width)
# create list of dates for consistency of later code
starts = np.asarray([starts])
stops = np.asarray([stops])
if len(starts.shape) > 1:
starts = starts.squeeze().tolist()
stops = stops.squeeze().tolist()
else:
starts = starts.tolist()
stops = stops.tolist()
# iterate until we run out of bounds
dates = []
time_range = []
if for_loop:
# iterate via for loop option
for inst in self.testInst:
dates.append(inst.date)
time_range.append((inst.index[0],
inst.index[-1]))
else:
# .next/.prev iterations
if reverse:
iterator = self.testInst.prev
else:
iterator = self.testInst.next
try:
while True:
iterator()
dates.append(self.testInst.date)
time_range.append((self.testInst.index[0],
self.testInst.index[-1]))
except StopIteration:
# reached the end
pass
# Deal with file or date iteration, make file inputs same as date for
# verification purposes.
if isinstance(step, int):
step = str(step) + 'D'
if isinstance(width, int):
width = dt.timedelta(days=width)
out = []
for start, stop in zip(starts, stops):
tdate = stop - width + dt.timedelta(days=1)
out.extend(pds.date_range(start, tdate, freq=step).tolist())
if reverse:
out = out[::-1]
assert np.all(dates == out)
output = {}
output['expected_times'] = out
output['observed_times'] = time_range
output['starts'] = starts
output['stops'] = stops
output['width'] = width
output['step'] = step
return output
# -------------------------------------------------------------------------
#
# Test basic loads, by date, filename, file id, as well as prev/next
#
# -------------------------------------------------------------------------
def eval_successful_load(self, end_date=None):
"""Support routine for evaluating successful loading of self.testInst
Parameters
----------
end_date : dt.datetime or NoneType
End date for loadind data. If None, assumes self.ref_time + 1 day.
(default=None)
"""
# Test that the first loaded time matches the first requested time
assert self.testInst.index[0] == self.ref_time, \
"First loaded time is incorrect"
# Test that the Instrument date is set to the requested start date
self.out = dt.datetime(self.ref_time.year, self.ref_time.month,
self.ref_time.day)
assert self.testInst.date == self.out, \
"Incorrect Instrument date attribute"
# Test that the end of the loaded data matches the requested end date
if end_date is None:
end_date = self.ref_time + dt.timedelta(days=1)
assert self.testInst.index[-1] > self.ref_time, \
"Last loaded time is not greater than the start time"
assert self.testInst.index[-1] <= end_date, \
"Last loaded time is greater than the requested end date"
return
def test_basic_instrument_load(self):
"""Test that the correct day is loaded, specifying only start year, doy
"""
# Load data by year and day of year
self.testInst.load(self.ref_time.year, self.ref_doy)
# Test that the loaded date range is correct
self.eval_successful_load()
return
def test_basic_instrument_load_w_kwargs(self):
"""Test that the correct day is loaded with optional kwarg
"""
# Load data by year and day of year
self.testInst.load(self.ref_time.year, self.ref_doy, num_samples=30)
# Test that the loaded date range is correct
self.eval_successful_load()
return
def test_basic_instrument_load_two_days(self):
"""Test that the correct day is loaded (checking object date and data).
"""
# Load the reference date
end_date = self.ref_time + dt.timedelta(days=2)
end_doy = int(end_date.strftime("%j"))
self.testInst.load(self.ref_time.year, self.ref_doy, end_date.year,
end_doy)
# Test that the loaded date range is correct
self.eval_successful_load(end_date=end_date)
return
def test_basic_instrument_bad_keyword_init(self):
"""Checks for error when instantiating with bad load keywords on init.
"""
# Test that the correct error is raised
with pytest.raises(ValueError) as verr:
pysat.Instrument(platform=self.testInst.platform,
name=self.testInst.name, num_samples=10,
clean_level='clean',
unsupported_keyword_yeah=True)
# Evaluate error message
assert str(verr).find("unknown keyword supplied") > 0
return
def test_basic_instrument_bad_keyword_at_load(self):
"""Checks for error when calling load with bad keywords.
"""
# Test that the correct error is raised
with pytest.raises(TypeError) as terr:
self.testInst.load(date=self.ref_time, unsupported_keyword=True)
# Evaluate error message
assert str(terr).find("load() got an unexpected keyword") >= 0
return
@pytest.mark.parametrize('kwarg', ['supported_tags', 'start', 'stop',
'freq', 'date_array', 'data_path'])
def test_basic_instrument_reserved_keyword(self, kwarg):
"""Check for error when instantiating with reserved keywords."""
# Check that the correct error is raised
with pytest.raises(ValueError) as err:
pysat.Instrument(platform=self.testInst.platform,
name=self.testInst.name, num_samples=10,
clean_level='clean',
**{kwarg: '1s'})
# Check that the error message is correct
estr = ''.join(('Reserved keyword "', kwarg, '" is not ',
'allowed at instantiation.'))
assert str(err).find(estr) >= 0
return
def test_basic_instrument_load_yr_no_doy(self):
"""Ensure doy required if yr present."""
# Check that the correct error is raised
with pytest.raises(TypeError) as err:
self.testInst.load(self.ref_time.year)
# Check that the error message is correct
estr = 'Unknown or incomplete input combination.'
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize('doy', [0, 367, 1000, -1, -10000])
def test_basic_instrument_load_yr_bad_doy(self, doy):
"""Ensure doy load keyword in valid range"""
with pytest.raises(ValueError) as err:
self.testInst.load(self.ref_time.year, doy)
estr = 'Day of year (doy) is only valid between and '
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize('end_doy', [0, 367, 1000, -1, -10000])
def test_basic_instrument_load_yr_bad_end_doy(self, end_doy):
"""Ensure end_doy keyword in valid range"""
with pytest.raises(ValueError) as err:
self.testInst.load(self.ref_time.year, 1, end_yr=self.ref_time.year,
end_doy=end_doy)
estr = 'Day of year (end_doy) is only valid between and '
assert str(err).find(estr) >= 0
return
def test_basic_instrument_load_yr_no_end_doy(self):
"""Ensure end_doy required if end_yr present"""
with pytest.raises(ValueError) as err:
self.testInst.load(self.ref_time.year, self.ref_doy,
self.ref_time.year)
estr = 'Both end_yr and end_doy must be set'
assert str(err).find(estr) >= 0
return
@pytest.mark.parametrize("input", [{'yr': 2009, 'doy': 1,
'date': dt.datetime(2009, 1, 1)},
{'yr': 2009, 'doy': 1,
'end_date': dt.datetime(2009, 1, 1)},
{'yr': 2009, 'doy': 1,
'fname': 'dummy_str.nofile'},
{'yr': 2009, 'doy': 1,
'stop_fname': 'dummy_str.nofile'},
{'date': dt.datetime(2009, 1, 1),
'fname': 'dummy_str.nofile'},
{'date': dt.datetime(2009, 1, 1),
'stop_fname': 'dummy_str.nofile'},
{'date': dt.datetime(2009, 1, 1),
'fname': 'dummy_str.nofile',
'end_yr': 2009, 'end_doy': 1}])
def test_basic_instrument_load_mixed_inputs(self, input):
"""Ensure mixed load inputs raise ValueError"""
with pytest.raises(ValueError) as err:
self.testInst.load(**input)
estr = 'An inconsistent set of inputs have been'
assert str(err).find(estr) >= 0
return
def test_basic_instrument_load_no_input(self):
"""Test .load() loads all data"""
self.testInst.load()
assert (self.testInst.index[0] == self.testInst.files.start_date)
assert (self.testInst.index[-1] >= self.testInst.files.stop_date)
assert (self.testInst.index[-1] <= self.testInst.files.stop_date
+ dt.timedelta(days=1))
return
@pytest.mark.parametrize('load_in,verr',
[('fname', 'have multi_file_day and load by file'),
(None, 'is not supported with multi_file_day')])
def test_basic_instrument_load_by_file_and_multifile(self, load_in, verr):
"""Ensure some load calls raises ValueError with multi_file_day as True
"""
self.testInst.multi_file_day = True
if load_in == 'fname':
load_kwargs = {load_in: self.testInst.files[0]}
else:
load_kwargs = dict()
with pytest.raises(ValueError) as err:
self.testInst.load(**load_kwargs)
assert str(err).find(verr) >= 0
return
def test_basic_instrument_load_by_date(self):
"""Test loading by date"""
self.testInst.load(date=self.ref_time)
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_basic_instrument_load_by_dates(self):
"""Test date range loading, date and end_date"""
end_date = self.ref_time + dt.timedelta(days=2)
self.testInst.load(date=self.ref_time, end_date=end_date)
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
self.out = self.testInst.index[-1]
assert (self.out >= self.ref_time + dt.timedelta(days=1))
assert (self.out <= self.ref_time + dt.timedelta(days=2))
def test_basic_instrument_load_by_date_with_extra_time(self):
"""Ensure .load(date=date) only uses year, month, day portion of date"""
# put in a date that has more than year, month, day
self.testInst.load(date=dt.datetime(2009, 1, 1, 1, 1, 1))
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_basic_instrument_load_data(self):
"""Test if the correct day is being loaded (checking down to the sec).
"""
self.testInst.load(self.ref_time.year, self.ref_doy)
assert (self.testInst.index[0] == self.ref_time)
def test_basic_instrument_load_leap_year(self):
"""Test if the correct day is being loaded (Leap-Year)."""
self.ref_time = dt.datetime(2008, 12, 31)
self.ref_doy = 366
self.testInst.load(self.ref_time.year, self.ref_doy)
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_next_load_default(self):
"""Test if first day is loaded by default when first invoking .next.
"""
self.ref_time = dt.datetime(2008, 1, 1)
self.testInst.next()
self.out = self.testInst.index[0]
assert self.out == self.ref_time
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_prev_load_default(self):
"""Test if last day is loaded by default when first invoking .prev.
"""
self.ref_time = dt.datetime(2010, 12, 31)
self.testInst.prev()
self.out = self.testInst.index[0]
assert self.out == self.ref_time
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_next_load_bad_start_file(self):
"""Test Error if trying to iterate when on a file not in iteration list
"""
self.testInst.load(fname=self.testInst.files[1])
# set new bounds that doesn't include this date
self.testInst.bounds = (self.testInst.files[0], self.testInst.files[20],
2, 1)
with pytest.raises(StopIteration) as err:
self.testInst.next()
estr = 'Unable to find loaded filename '
assert str(err).find(estr) >= 0
return
def test_prev_load_bad_start_file(self):
"""Test Error if trying to iterate when on a file not in iteration list
"""
self.testInst.load(fname=self.testInst.files[12])
# set new bounds that doesn't include this date
self.testInst.bounds = (self.testInst.files[9], self.testInst.files[20],
2, 1)
with pytest.raises(StopIteration) as err:
self.testInst.prev()
estr = 'Unable to find loaded filename '
assert str(err).find(estr) >= 0
return
def test_next_load_bad_start_date(self):
"""Test Error if trying to iterate when on a date not in iteration list
"""
self.testInst.load(date=self.ref_time)
# set new bounds that doesn't include this date
self.testInst.bounds = (self.ref_time + dt.timedelta(days=1),
self.ref_time + dt.timedelta(days=10),
'2D', dt.timedelta(days=1))
with pytest.raises(StopIteration) as err:
self.testInst.next()
estr = 'Unable to find loaded date '
assert str(err).find(estr) >= 0
return
def test_prev_load_bad_start_date(self):
"""Test Error if trying to iterate when on a date not in iteration list
"""
self.ref_time = dt.datetime(2008, 1, 2)
self.testInst.load(date=self.ref_time)
# set new bounds that doesn't include this date
self.testInst.bounds = (self.ref_time + dt.timedelta(days=1),
self.ref_time + dt.timedelta(days=10),
'2D', dt.timedelta(days=1))
with pytest.raises(StopIteration) as err:
self.testInst.prev()
estr = 'Unable to find loaded date '
assert str(err).find(estr) >= 0
return
def test_next_load_empty_iteration(self):
"""Ensure empty iteration list handled ok via .next"""
self.testInst.bounds = (None, None, '10000D',
dt.timedelta(days=10000))
with pytest.raises(StopIteration) as err:
self.testInst.next()
estr = 'File list is empty. '
assert str(err).find(estr) >= 0
return
def test_prev_load_empty_iteration(self):
"""Ensure empty iteration list handled ok via .prev"""
self.testInst.bounds = (None, None, '10000D',
dt.timedelta(days=10000))
with pytest.raises(StopIteration) as err:
self.testInst.prev()
estr = 'File list is empty. '
assert str(err).find(estr) >= 0
return
def test_next_fname_load_default(self):
"""Test next day is being loaded (checking object date)."""
self.ref_time = dt.datetime(2008, 1, 2)
self.testInst.load(fname=self.testInst.files[0])
self.testInst.next()
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_prev_fname_load_default(self):
"""Test prev day is loaded when invoking .prev."""
self.ref_time = dt.datetime(2008, 1, 3)
self.testInst.load(fname=self.testInst.files[3])
self.testInst.prev()
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_basic_fname_instrument_load(self):
"""Test loading by filename from attached .files.
"""
self.ref_time = dt.datetime(2008, 1, 1)
self.testInst.load(fname=self.testInst.files[0])
self.out = self.testInst.index[0]
assert (self.out == self.ref_time)
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_filename_load(self):
"""Test if file is loadable by filename, relative to
top_data_dir/platform/name/tag"""
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'))
assert self.testInst.index[0] == self.ref_time
def test_filenames_load(self):
"""Test if files are loadable by filenames, relative to
top_data_dir/platform/name/tag"""
stop_fname = self.ref_time + dt.timedelta(days=1)
stop_fname = stop_fname.strftime('%Y-%m-%d.nofile')
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'),
stop_fname=stop_fname)
assert self.testInst.index[0] == self.ref_time
assert self.testInst.index[-1] >= self.ref_time + dt.timedelta(days=1)
assert self.testInst.index[-1] <= self.ref_time + dt.timedelta(days=2)
def test_filenames_load_out_of_order(self):
"""Test error raised if fnames out of temporal order"""
stop_fname = self.ref_time + dt.timedelta(days=1)
stop_fname = stop_fname.strftime('%Y-%m-%d.nofile')
with pytest.raises(ValueError) as err:
check_fname = self.ref_time.strftime('%Y-%m-%d.nofile')
self.testInst.load(fname=stop_fname,
stop_fname=check_fname)
estr = '`stop_fname` must occur at a later date '
assert str(err).find(estr) >= 0
def test_next_filename_load_default(self):
"""Test next day is being loaded (checking object date)."""
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'))
self.testInst.next()
self.out = self.testInst.index[0]
assert (self.out == self.ref_time + dt.timedelta(days=1))
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_prev_filename_load_default(self):
"""Test prev day is loaded when invoking .prev."""
self.testInst.load(fname=self.ref_time.strftime('%Y-%m-%d.nofile'))
self.testInst.prev()
self.out = self.testInst.index[0]
assert (self.out == self.ref_time - dt.timedelta(days=1))
self.out = dt.datetime(self.out.year, self.out.month, self.out.day)
assert (self.out == self.testInst.date)
def test_list_files(self):
files = self.testInst.files.files
assert isinstance(files, pds.Series)
def test_remote_file_list(self):
"""Test remote_file_list for valid list of files"""
stop = self.ref_time + dt.timedelta(days=30)
self.out = self.testInst.remote_file_list(start=self.ref_time,
stop=stop)
assert filter_datetime_input(self.out.index[0]) == self.ref_time
assert filter_datetime_input(self.out.index[-1]) == stop
def test_remote_date_range(self):
"""Test remote_date_range for valid pair of dates"""
stop = self.ref_time + dt.timedelta(days=30)
self.out = self.testInst.remote_date_range(start=self.ref_time,
stop=stop)
assert len(self.out) == 2
assert filter_datetime_input(self.out[0]) == self.ref_time
assert filter_datetime_input(self.out[-1]) == stop
@pytest.mark.parametrize("file_bounds, non_default",
[(False, False), (True, False), (False, True),
(True, True)])
def test_download_updated_files(self, caplog, file_bounds, non_default):
"""Test download_updated_files and default bounds are updated"""
if file_bounds:
if non_default:
# set bounds to second and second to last file
self.testInst.bounds = (self.testInst.files[1],
self.testInst.files[-2])
else:
# set bounds to first and last file
self.testInst.bounds = (self.testInst.files[0],
self.testInst.files[-1])
else:
if non_default:
# set bounds to first and first date
self.testInst.bounds = (self.testInst.files.start_date,
self.testInst.files.start_date)
with caplog.at_level(logging.INFO, logger='pysat'):
self.testInst.download_updated_files()
# Test the logging output for the following conditions:
# - perform a local search,
# - new files are found,
# - download new files, and
# - update local file list.
assert "local files" in caplog.text
assert "that are new or updated" in caplog.text
assert "Downloading data to" in caplog.text
assert "Updating pysat file list" in caplog.text
if non_default:
assert "Updating instrument object bounds " not in caplog.text
else:
text = caplog.text
if file_bounds:
assert "Updating instrument object bounds by file" in text
else:
assert "Updating instrument object bounds by date" in text
def test_download_recent_data(self, caplog):
with caplog.at_level(logging.INFO, logger='pysat'):
self.testInst.download()
# Ensure user was told that recent data will be downloaded
assert "most recent data by default" in caplog.text
# Ensure user was notified of new files being download
assert "Downloading data to" in caplog.text
# Ensure user was notified of updates to the local file list
assert "Updating pysat file list" in caplog.text
def test_download_bad_date_range(self, caplog):
"""Test download with bad date input."""
with caplog.at_level(logging.WARNING, logger='pysat'):
self.testInst.download(start=self.ref_time,
stop=self.ref_time - dt.timedelta(days=10))
# Ensure user is warned about not calling download due to bad time input
assert "Requested download over an empty date range" in caplog.text
return
# -------------------------------------------------------------------------
#
# Test date helpers
#
# -------------------------------------------------------------------------
def test_today_yesterday_and_tomorrow(self):
""" Test the correct instantiation of yesterday/today/tomorrow dates
"""
self.ref_time = dt.datetime.utcnow()
self.out = dt.datetime(self.ref_time.year, self.ref_time.month,
self.ref_time.day)
assert self.out == self.testInst.today()
assert self.out - dt.timedelta(days=1) == self.testInst.yesterday()
assert self.out + dt.timedelta(days=1) == self.testInst.tomorrow()
@pytest.mark.parametrize("in_time, islist",
[(dt.datetime.utcnow(), False),
(dt.datetime(2010, 1, 1, 12, tzinfo=dt.timezone(
dt.timedelta(seconds=14400))), False),
([dt.datetime(2010, 1, 1, 12, i,
tzinfo=dt.timezone(
dt.timedelta(seconds=14400)))
for i in range(3)], True)])
def test_filter_datetime(self, in_time, islist):
""" Test the range of allowed inputs for the Instrument datetime filter
"""
# Because the input datetime is the middle of the day and the offset
# is four hours, the reference date and input date are the same
if islist:
self.ref_time = [dt.datetime(tt.year, tt.month, tt.day)
for tt in in_time]
self.out = filter_datetime_input(in_time)
else:
self.ref_time = [dt.datetime(in_time.year, in_time.month,
in_time.day)]
self.out = [filter_datetime_input(in_time)]
# Test for the date values and timezone awareness status
for i, tt in enumerate(self.out):
assert self.out[i] == self.ref_time[i]
assert self.out[i].tzinfo is None or self.out[i].utcoffset() is None
def test_filtered_date_attribute(self):
""" Test use of filter during date assignment
"""
self.ref_time = dt.datetime.utcnow()
self.out = dt.datetime(self.ref_time.year, self.ref_time.month,
self.ref_time.day)
self.testInst.date = self.ref_time
assert self.out == self.testInst.date
# -------------------------------------------------------------------------
#
# Test __eq__ method
#
# -------------------------------------------------------------------------
def test_eq_no_data(self):
"""Test equality when the same object"""
inst_copy = self.testInst.copy()
assert inst_copy == self.testInst
return
def test_eq_both_with_data(self):
"""Test equality when the same object with loaded data"""
self.testInst.load(date=self.ref_time)
inst_copy = self.testInst.copy()
assert inst_copy == self.testInst
return
def test_eq_one_with_data(self):
"""Test equality when the same objects but only one with loaded data"""
self.testInst.load(date=self.ref_time)
inst_copy = self.testInst.copy()
inst_copy.data = self.testInst._null_data
assert not (inst_copy == self.testInst)
return
def test_eq_different_data_type(self):
"""Test equality different data type"""
self.testInst.load(date=self.ref_time)
inst_copy = self.testInst.copy()
if self.testInst.pandas_format:
inst_copy.pandas_format = False
inst_copy.data = xr.Dataset()
else:
inst_copy.pandas_format = True
inst_copy.data = pds.DataFrame()
assert not (inst_copy == self.testInst)
return
def test_eq_different_object(self):
"""Test equality using different pysat.Instrument objects"""
reload(pysat.instruments.pysat_testing)
obj1 = pysat.Instrument(platform='pysat', name='testing',
num_samples=10, clean_level='clean',
update_files=True)
reload(pysat.instruments.pysat_testing_xarray)
obj2 = pysat.Instrument(platform='pysat', name='testing_xarray',
num_samples=10, clean_level='clean',
update_files=True)
assert not (obj1 == obj2)
return
def test_eq_different_type(self):
"""Test equality False when non-Instrument object"""
assert self.testInst != np.array([])
return
def test_inequality_modified_object(self):
"""Test that equality is false if other missing attributes"""
self.out = self.testInst.copy()
# Remove attribute
del self.out.platform
assert self.testInst != self.out
return
def test_inequality_reduced_object(self):
"""Test that equality is false if self missing attributes"""
self.out = self.testInst.copy()
self.out.hi_there = 'hi'
assert self.testInst != self.out
return
# -------------------------------------------------------------------------
#
# Test copy method
#
# -------------------------------------------------------------------------
def test_copy(self):
"""Test .copy()"""
inst_copy = self.testInst.copy()
assert inst_copy == self.testInst
return
def test_copy_from_reference(self):
"""Test .copy() if a user invokes from a weakref.proxy of Instrument"""
inst_copy = self.testInst.orbits.inst.copy()
inst_copy2 = self.testInst.files.inst_info['inst'].copy()
assert inst_copy == self.testInst
assert inst_copy == inst_copy2
assert inst_copy2 == self.testInst
return
def test_copy_w_inst_module(self):
"""Test .copy() with inst_module != None"""
# Assign module to inst_module
self.testInst.inst_module = pysat.instruments.pysat_testing
inst_copy = self.testInst.copy()
# Confirm equality and that module is still present
assert inst_copy == self.testInst
assert inst_copy.inst_module == pysat.instruments.pysat_testing
assert self.testInst.inst_module == pysat.instruments.pysat_testing
return
# -------------------------------------------------------------------------
#
# Test concat_data method
#
# -------------------------------------------------------------------------
@pytest.mark.parametrize("prepend, sort_dim_toggle",
[(True, True), (True, False), (False, False)])
def test_concat_data(self, prepend, sort_dim_toggle):
""" Test Instrument data concatonation
"""
# Load a data set to concatonate
self.testInst.load(self.ref_time.year, self.ref_doy + 1)
data2 = self.testInst.data
len2 = len(self.testInst.index)
# Load a different data set into the instrument
self.testInst.load(self.ref_time.year, self.ref_doy)
len1 = len(self.testInst.index)
# Set the keyword arguments
kwargs = {'prepend': prepend}
if sort_dim_toggle:
if self.testInst.pandas_format:
kwargs['sort'] = True
else:
kwargs['dim'] = 'Epoch2'
data2 = data2.rename({xarray_epoch_name: 'Epoch2'})
self.testInst.data = self.testInst.data.rename(
{xarray_epoch_name: 'Epoch2'})
# Concat together
self.testInst.concat_data(data2, **kwargs)
if sort_dim_toggle and not self.testInst.pandas_format:
# Rename to the standard epoch name
self.testInst.data = self.testInst.data.rename(
{'Epoch2': xarray_epoch_name})
# Basic test for concatenation
self.out = len(self.testInst.index)
assert (self.out == len1 + len2)
# Detailed test for concatonation through index
if prepend:
assert np.all(self.testInst.index[:len1]
> self.testInst.index[len1:])
else:
assert np.all(self.testInst.index[:len1]
< self.testInst.index[len1:])
if self.testInst.pandas_format:
if sort_dim_toggle:
assert np.all(self.testInst.data.columns
== np.sort(data2.columns))
else:
assert np.all(self.testInst.data.columns == data2.columns)
# -------------------------------------------------------------------------
#
# Test empty property flags, if True, no data
#
# -------------------------------------------------------------------------
def test_empty_flag_data_empty(self):
""" Test the status of the empty flag for unloaded data."""
assert self.testInst.empty
return
def test_empty_flag_data_not_empty(self):
""" Test the status of the empty flag for loaded data."""
self.testInst.load(date=self.ref_time)
assert not self.testInst.empty
# -------------------------------------------------------------------------
#
# Test index attribute, should always be a datetime index
#
# -------------------------------------------------------------------------
def test_index_attribute(self):
""" Test the index attribute before and after loading data."""
# empty Instrument test
assert isinstance(self.testInst.index, pds.Index)
# now repeat the same test but with data loaded
self.testInst.load(date=self.ref_time)
assert isinstance(self.testInst.index, pds.Index)
def test_index_return(self):
# load data
self.testInst.load(self.ref_time.year, self.ref_doy)
# ensure we get the index back
if self.testInst.pandas_format:
assert np.all(self.testInst.index == self.testInst.data.index)
else:
assert np.all(self.testInst.index
== self.testInst.data.indexes[xarray_epoch_name])
# #------------------------------------------------------------------------
# #
# # Test custom attributes
# #
# #------------------------------------------------------------------------
def test_retrieve_bad_attribute(self):
with pytest.raises(AttributeError):
self.testInst.bad_attr
def test_base_attr(self):
self.testInst._base_attr
assert '_base_attr' in dir(self.testInst)
def test_inst_attributes_not_overridden(self):
"""Test that custom Instrument attributes are not overwritten upon load
"""
greeting = '... listen!'
self.testInst.hei = greeting
self.testInst.load(date=self.ref_time)
assert self.testInst.hei == greeting
# -------------------------------------------------------------------------
#
# test textual representations
#
# -------------------------------------------------------------------------
def test_basic_repr(self):
"""The repr output will match the beginning of the str output"""
self.out = self.testInst.__repr__()
assert isinstance(self.out, str)
assert self.out.find("pysat.Instrument(") == 0
def test_basic_str(self):
"""Check for lines from each decision point in repr"""
self.out = self.testInst.__str__()
assert isinstance(self.out, str)
assert self.out.find('pysat Instrument object') == 0
# No custom functions
assert self.out.find('Custom Functions: 0') > 0
# No orbital info
assert self.out.find('Orbit Settins') < 0
# Files exist for test inst
assert self.out.find('Date Range:') > 0
# No loaded data
assert self.out.find('No loaded data') > 0
assert self.out.find('Number of variables') < 0
assert self.out.find('uts') < 0
def test_str_w_orbit(self):
"""Test string output with Orbit data """
reload(pysat.instruments.pysat_testing)
orbit_info = {'index': 'mlt',
'kind': 'local time',
'period': np.timedelta64(97, 'm')}
testInst = pysat.Instrument(platform='pysat', name='testing',
num_samples=10,
clean_level='clean',
update_files=True,
orbit_info=orbit_info)
self.out = testInst.__str__()
# Check that orbit info is passed through
assert self.out.find('Orbit Settings') > 0
assert self.out.find(orbit_info['kind']) > 0
assert self.out.find('Loaded Orbit Number: 0') > 0
# Activate orbits, check that message has changed
testInst.load(self.ref_time.year, self.ref_doy)
testInst.orbits.next()
self.out = testInst.__str__()
assert self.out.find('Loaded Orbit Number: 1') > 0
def test_str_w_padding(self):
"""Test string output with data padding """
self.testInst.pad = dt.timedelta(minutes=5)
self.out = self.testInst.__str__()
assert self.out.find('Data Padding: 0:05:00') > 0
def test_str_w_custom_func(self):
"""Test string output with custom function """
def testfunc(self):
pass
self.testInst.custom_attach(testfunc)
self.out = self.testInst.__str__()
assert self.out.find('testfunc') > 0
def test_str_w_load_lots_data(self):
"""Test string output with loaded data """
self.testInst.load(self.ref_time.year, self.ref_doy)
self.out = self.testInst.__str__()
assert self.out.find('Number of variables:') > 0
assert self.out.find('...') > 0
def test_str_w_load_less_data(self):
"""Test string output with loaded data """
# Load the test data
self.testInst.load(self.ref_time.year, self.ref_doy)
# Ensure the desired data variable is present and delete all others
# 4-6 variables are needed to test all lines; choose the lesser limit
nvar = 4
self.testInst.data = self.testInst.data[self.testInst.variables[:nvar]]
# Test output with one data variable
self.out = self.testInst.__str__()
assert self.out.find('Number of variables: 4') > 0
assert self.out.find('Variable Names') > 0
for n in range(nvar):
assert self.out.find(self.testInst.variables[n]) > 0
# -------------------------------------------------------------------------
#
# test instrument initialization functions
#
# -------------------------------------------------------------------------
def test_instrument_init(self):
"""Test if init function supplied by instrument can modify object"""
assert self.testInst.new_thing
def test_custom_instrument_load(self):
"""
Test if the correct day is being loaded (End-to-End),
with no instrument file but routines are passed.
"""
import pysat.instruments.pysat_testing as test
self.out = pysat.Instrument(inst_module=test, tag='',
clean_level='clean')
self.ref_time = dt.datetime(2009, 2, 1)
self.ref_doy = 32
self.out.load(self.ref_time.year, self.ref_doy)
assert self.out.date == self.ref_time
def test_custom_instrument_load_2(self):
"""
Test if an exception is thrown correctly if there is no
instrument file and supplied routines are incomplete.
"""
import pysat.instruments.pysat_testing as test
del test.list_files
with pytest.raises(AttributeError):
pysat.Instrument(inst_module=test, tag='',
clean_level='clean')
def test_custom_instrument_load_3(self):
"""
Test if an exception is thrown correctly if there is no
instrument file and supplied routines are incomplete.
"""
import pysat.instruments.pysat_testing as test
del test.load
with pytest.raises(AttributeError):
pysat.Instrument(inst_module=test, tag='',
clean_level='clean')
# -------------------------------------------------------------------------
#
# test instrument initialization keyword mapping to instrument functions
#
# -------------------------------------------------------------------------
@pytest.mark.parametrize("func, kwarg, val", [('init', 'test_init_kwarg',
True),
('clean', 'test_clean_kwarg',
False),
('preprocess',
'test_preprocess_kwarg',
'test_phrase'),
('load', 'test_load_kwarg',
'bright_light'),
('list_files',
'test_list_files_kwarg',
'sleep_tight'),
('list_remote_files',
'test_list_remote_kwarg',
'one_eye_open'),
('download',
'test_download_kwarg',
'exit_night')
])
def test_instrument_function_keywords(self, func, kwarg, val, caplog):
"""Test if Instrument function keywords are registered by pysat"""
with caplog.at_level(logging.INFO, logger='pysat'):
# Trigger load functions
self.testInst.load(date=self.ref_time)
# Refresh files to trigger other functions
self.testInst.files.refresh()
# Get remote file list
self.testInst.download_updated_files()
# Confirm kwargs made it where they should be
assert kwarg in self.testInst.kwargs[func]
assert self.testInst.kwargs[func][kwarg] == val
# Check if function under test can assign attributes, not all can
live_check = hasattr(self.testInst, kwarg)
if live_check:
# Confirm attribute value
assert getattr(self.testInst, kwarg) == val
else:
# Confirm value echoed to log for functions that can't assign
# attributes. Get log text.
captured = caplog.text
# Test for expected string
test_str = ''.join((kwarg, ' = ', str(val)))
assert captured.find(test_str) >= 0
return
@pytest.mark.parametrize("func, kwarg", [('clean', 'test_clean_kwarg'),
('preprocess',
'test_preprocess_kwarg'),
('load',
'test_load_kwarg'),
('list_files',
'test_list_files_kwarg'),
('list_files',
'test_list_files_kwarg'),
('list_remote_files',
'test_list_remote_kwarg'),
('download',
'test_download_kwarg')
])
def test_instrument_function_keyword_liveness(self, func, kwarg, caplog):
"""Test if changed keywords are propagated by pysat to functions"""
# Assign a new value to a keyword argument
val = 'live_value'
self.testInst.kwargs[func][kwarg] = val
with caplog.at_level(logging.INFO, logger='pysat'):
# Trigger load functions
self.testInst.load(date=self.ref_time)
# Refresh files to trigger other functions
self.testInst.files.refresh()
# Get remote file list
self.testInst.download_updated_files()
# The passed parameter should be set on Instrument, if a full function
live_check = hasattr(self.testInst, kwarg)
# Not all functions are passed the instrument object
if live_check:
# Confirm attribute value
assert getattr(self.testInst, kwarg) == val
else:
# Confirm value echoed to log for functions that can't assign
# attributes.
captured = caplog.text
# Confirm presence of test string in log
test_str = ''.join((kwarg, ' = ', str(val)))
assert captured.find(test_str) >= 0
return
def test_error_undefined_input_keywords(self):
"""Test for error if undefined keywords provided at instantiation"""
# Add a new keyword
self.testInst.kwargs['load']['undefined_keyword1'] = True
self.testInst.kwargs['load']['undefined_keyword2'] = False
with pytest.raises(ValueError) as err:
# Instantiate instrument with new undefined keyword involved
eval(self.testInst.__repr__())
estr = "".join(("unknown keywords supplied: ['undefined_keyword1',",
" 'undefined_keyword2']"))
assert str(err).find(estr) >= 0
def test_supported_input_keywords(self):
"""Test that supported keywords exist"""
funcs = ['load', 'init', 'list_remote_files', 'list_files', 'download',
'preprocess', 'clean']
# Test instruments all have a supported keyword. Ensure keyword
# present for all functions.
for func in funcs:
assert func in self.testInst.kwargs_supported
assert len(self.testInst.kwargs_supported[func]) > 0
# Confirm all user provided keywords are in the supported keywords
for func in funcs:
for kwarg in self.testInst.kwargs[func]:
assert kwarg in self.testInst.kwargs_supported[func]
return
# -------------------------------------------------------------------------
#
# Test basic data access features, both getting and setting data
#
# -------------------------------------------------------------------------
@pytest.mark.parametrize("labels", [('mlt'),
(['mlt', 'longitude']),
(['longitude', 'mlt'])])
def test_basic_data_access_by_name(self, labels):
"""Check that data can be accessed at the instrument level"""
self.testInst.load(self.ref_time.year, self.ref_doy)
assert np.all((self.testInst[labels]
== self.testInst.data[labels]).values)
@pytest.mark.parametrize("index", [(0),
([0, 1, 2, 3]),
(slice(0, 10)),
(np.arange(0, 10))])
def test_data_access_by_indices_and_name(self, index):
"""Check that variables and be accessed by each supported index type"""
self.testInst.load(self.ref_time.year, self.ref_doy)
assert np.all(self.testInst[index, 'mlt']
== self.testInst.data['mlt'][index])
def test_data_access_by_row_slicing_and_name_slicing(self):
"""Check that each variable is downsampled """
self.testInst.load(self.ref_time.year, self.ref_doy)
result = self.testInst[0:10, :]
for variable, array in result.items():
assert len(array) == len(self.testInst.data[variable].values[0:10])
assert np.all(array == self.testInst.data[variable].values[0:10])
def test_data_access_by_datetime_and_name(self):
"""Check that datetime can be used to access data"""
self.testInst.load(self.ref_time.year, self.ref_doy)
self.out = dt.datetime(2009, 1, 1, 0, 0, 0)
assert np.all(self.testInst[self.out, 'uts']
== self.testInst.data['uts'].values[0])
def test_data_access_by_datetime_slicing_and_name(self):
"""Check that a slice of datetimes can be used to access data"""
self.testInst.load(self.ref_time.year, self.ref_doy)
time_step = (self.testInst.index[1]
- self.testInst.index[0]).value / 1.E9
offset = dt.timedelta(seconds=(10 * time_step))
start = dt.datetime(2009, 1, 1, 0, 0, 0)
stop = start + offset
assert np.all(self.testInst[start:stop, 'uts']
== self.testInst.data['uts'].values[0:11])
def test_setting_data_by_name(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
def test_setting_series_data_by_name(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = \
2. * pds.Series(self.testInst['mlt'].values,
index=self.testInst.index)
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
self.testInst['blankMLT'] = pds.Series(None, dtype='float64')
assert np.all(np.isnan(self.testInst['blankMLT']))
def test_setting_pandas_dataframe_by_names(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst[['doubleMLT', 'tripleMLT']] = \
pds.DataFrame({'doubleMLT': 2. * self.testInst['mlt'].values,
'tripleMLT': 3. * self.testInst['mlt'].values},
index=self.testInst.index)
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
assert np.all(self.testInst['tripleMLT'] == 3. * self.testInst['mlt'])
def test_setting_data_by_name_single_element(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = 2.
assert np.all(self.testInst['doubleMLT'] == 2.)
self.testInst['nanMLT'] = np.nan
assert np.all(np.isnan(self.testInst['nanMLT']))
def test_setting_data_by_name_with_meta(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = {'data': 2. * self.testInst['mlt'],
'units': 'hours',
'long_name': 'double trouble'}
assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])
assert self.testInst.meta['doubleMLT'].units == 'hours'
assert self.testInst.meta['doubleMLT'].long_name == 'double trouble'
def test_setting_partial_data(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.out = self.testInst
if self.testInst.pandas_format:
self.testInst[0:3] = 0
assert np.all(self.testInst[3:] == self.out[3:])
assert np.all(self.testInst[0:3] == 0)
else:
pytest.skip("This notation does not make sense for xarray")
@pytest.mark.parametrize("changed,fixed",
[(0, slice(1, None)),
([0, 1, 2, 3], slice(4, None)),
(slice(0, 10), slice(10, None)),
(np.array([0, 1, 2, 3]), slice(4, None)),
(dt.datetime(2009, 1, 1), slice(1, None)),
(slice(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 1, 0, 1)),
slice(dt.datetime(2009, 1, 1, 0, 1), None))])
def test_setting_partial_data_by_inputs(self, changed, fixed):
"""Check that data can be set using each supported input type"""
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
self.testInst[changed, 'doubleMLT'] = 0
assert (self.testInst[fixed, 'doubleMLT']
== 2. * self.testInst[fixed, 'mlt']).all
assert (self.testInst[changed, 'doubleMLT'] == 0).all
def test_setting_partial_data_by_index_and_name(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
self.testInst[self.testInst.index[0:10], 'doubleMLT'] = 0
assert (self.testInst[10:, 'doubleMLT']
== 2. * self.testInst[10:, 'mlt']).all
assert (self.testInst[0:10, 'doubleMLT'] == 0).all
def test_modifying_data_inplace(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst['doubleMLT'] = 2. * self.testInst['mlt']
self.testInst['doubleMLT'] += 100
assert (self.testInst['doubleMLT']
== 2. * self.testInst['mlt'] + 100).all
def test_getting_all_data_by_index(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
a = self.testInst[[0, 1, 2, 3, 4]]
if self.testInst.pandas_format:
assert len(a) == 5
else:
assert a.sizes[xarray_epoch_name] == 5
def test_getting_all_data_by_numpy_array_of_int(self):
self.testInst.load(self.ref_time.year, self.ref_doy)
a = self.testInst[np.array([0, 1, 2, 3, 4])]
if self.testInst.pandas_format:
assert len(a) == 5
else:
assert a.sizes[xarray_epoch_name] == 5
# -------------------------------------------------------------------------
#
# Test variable renaming
#
# -------------------------------------------------------------------------
@pytest.mark.parametrize("values", [{'uts': 'uts1'},
{'uts': 'uts2',
'mlt': 'mlt2'},
{'uts': 'long change with spaces'}])
def test_basic_variable_renaming(self, values):
# test single variable
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst.rename(values)
for key in values:
# check for new name
assert values[key] in self.testInst.data
assert values[key] in self.testInst.meta
# ensure old name not present
assert key not in self.testInst.data
assert key not in self.testInst.meta
@pytest.mark.parametrize("values", [{'help': 'I need somebody'},
{'UTS': 'litte_uts'},
{'utS': 'uts1'},
{'utS': 'uts'}])
def test_unknown_variable_error_renaming(self, values):
# check for error for unknown variable name
self.testInst.load(self.ref_time.year, self.ref_doy)
with pytest.raises(ValueError):
self.testInst.rename(values)
@pytest.mark.parametrize("values", [{'uts': 'UTS1'},
{'uts': 'UTs2',
'mlt': 'Mlt2'},
{'uts': 'Long Change with spaces'}])
def test_basic_variable_renaming_lowercase(self, values):
# test single variable
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst.rename(values, lowercase_data_labels=True)
for key in values:
# check for new name
assert values[key].lower() in self.testInst.data
assert values[key].lower() in self.testInst.meta
# ensure case retained in meta
assert values[key] == self.testInst.meta[values[key]].name
# ensure old name not present
assert key not in self.testInst.data
assert key not in self.testInst.meta
@pytest.mark.parametrize("values", [{'profiles': {'density': 'ionization'}},
{'profiles': {'density': 'mass'},
'alt_profiles':
{'density': 'volume'}}])
def test_ho_pandas_variable_renaming(self, values):
# check for pysat_testing2d instrument
if self.testInst.platform == 'pysat':
if self.testInst.name == 'testing2d':
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst.rename(values)
for key in values:
for ikey in values[key]:
# check column name unchanged
assert key in self.testInst.data
assert key in self.testInst.meta
# check for new name in HO data
assert values[key][ikey] in self.testInst[0, key]
check_var = self.testInst.meta[key]['children']
assert values[key][ikey] in check_var
# ensure old name not present
assert ikey not in self.testInst[0, key]
check_var = self.testInst.meta[key]['children']
assert ikey not in check_var
@pytest.mark.parametrize("values", [{'profiles':
{'help': 'I need somebody'}},
{'fake_profi':
{'help': 'Not just anybody'}},
{'wrong_profile':
{'help': 'You know I need someone'},
'fake_profiles':
{'Beatles': 'help!'},
'profiles':
{'density': 'valid_change'}},
{'fake_profile':
{'density': 'valid HO change'}},
{'Nope_profiles':
{'density': 'valid_HO_change'}}])
def test_ho_pandas_unknown_variable_error_renaming(self, values):
# check for pysat_testing2d instrument
if self.testInst.platform == 'pysat':
if self.testInst.name == 'testing2d':
self.testInst.load(self.ref_time.year, self.ref_doy)
# check for error for unknown column or HO variable name
with pytest.raises(ValueError):
self.testInst.rename(values)
@pytest.mark.parametrize("values", [{'profiles': {'density': 'Ionization'}},
{'profiles': {'density': 'MASa'},
'alt_profiles':
{'density': 'VoLuMe'}}])
def test_ho_pandas_variable_renaming_lowercase(self, values):
# check for pysat_testing2d instrument
if self.testInst.platform == 'pysat':
if self.testInst.name == 'testing2d':
self.testInst.load(self.ref_time.year, self.ref_doy)
self.testInst.rename(values)
for key in values:
for ikey in values[key]:
# check column name unchanged
assert key in self.testInst.data
assert key in self.testInst.meta
# check for new name in HO data
test_val = values[key][ikey]
assert test_val in self.testInst[0, key]
check_var = self.testInst.meta[key]['children']
# case insensitive check
assert values[key][ikey] in check_var
# ensure new case in there
check_var = check_var[values[key][ikey]].name
assert values[key][ikey] == check_var
# ensure old name not present
assert ikey not in self.testInst[0, key]
check_var = self.testInst.meta[key]['children']
assert ikey not in check_var
# -------------------------------------------------------------------------
#
# Test iteration behaviors
#
# -------------------------------------------------------------------------
def test_list_comprehension(self):
"""Test list comprehensions for length, uniqueness, post iteration data
"""
self.testInst.bounds = (self.testInst.files.files.index[0],
self.testInst.files.files.index[9])
# ensure no data to begin
assert self.testInst.empty
# perform comprehension and ensure there are as many as there should be
insts = [inst for inst in self.testInst]
assert len(insts) == 10
# get list of dates
dates = pds.Series([inst.date for inst in insts])
assert dates.is_monotonic_increasing
# dates are unique
assert np.all(np.unique(dates) == dates.values)
# iteration instruments are not the same as original
for inst in insts:
assert not (inst is self.testInst)
# check there is data after iteration
assert not self.testInst.empty
return
def test_left_bounds_with_prev(self):
"""Test if passing bounds raises StopIteration."""
# load first data
self.testInst.next()
with pytest.raises(StopIteration):
# go back to no data
self.testInst.prev()
def test_right_bounds_with_next(self):
"""Test if passing bounds raises StopIteration."""
# load last data
self.testInst.prev()
with pytest.raises(StopIteration):
# move on to future data that doesn't exist
self.testInst.next()
def test_set_bounds_with_frequency(self):
"""Test setting bounds with non-default step"""
start = self.ref_time
stop = self.ref_time + dt.timedelta(days=14)
self.testInst.bounds = (start, stop, 'M')
assert np.all(self.testInst._iter_list
== pds.date_range(start, stop, freq='M').tolist())
def test_iterate_bounds_with_frequency(self):
"""Test iterating bounds with non-default step"""
start = self.ref_time
stop = self.ref_time + dt.timedelta(days=15)
self.testInst.bounds = (start, stop, '2D')
dates = []
for inst in self.testInst:
dates.append(inst.date)
out = pds.date_range(start, stop, freq='2D').tolist()
assert np.all(dates == out)
def test_set_bounds_with_frequency_and_width(self):
"""Set date bounds with step/width>1"""
start = self.ref_time
stop = self.ref_time + pds.DateOffset(months=11, days=25)
stop = stop.to_pydatetime()
self.testInst.bounds = (start, stop, '10D', dt.timedelta(days=10))
assert np.all(self.testInst._iter_list
== pds.date_range(start, stop, freq='10D').tolist())
def verify_inclusive_iteration(self, out, forward=True):
"""Verify loaded dates for inclusive iteration, forward or backward"""
if forward:
# verify range of loaded data when iterating forward
for i, trange in enumerate(out['observed_times']):
# determine which range we are in
b_range = 0
while out['expected_times'][i] > out['stops'][b_range]:
b_range += 1
# check loaded range is correct
assert trange[0] == out['expected_times'][i]
check = out['expected_times'][i] + out['width']
check -= dt.timedelta(days=1)
assert trange[1] > check
check = out['stops'][b_range] + dt.timedelta(days=1)
assert trange[1] < check
else:
# verify range of loaded data when going backwards
for i, trange in enumerate(out['observed_times']):
# determine which range we are in
b_range = 0
while out['expected_times'][i] > out['stops'][b_range]:
b_range += 1
# check start against expectations
assert trange[0] == out['expected_times'][i]
# check end against expectations
check = out['expected_times'][i] + out['width']
check -= dt.timedelta(days=1)
assert trange[1] > check
check = out['stops'][b_range] + dt.timedelta(days=1)
assert trange[1] < check
if i == 0:
# check first load is before end of bounds
check = out['stops'][b_range] - out['width']
check += dt.timedelta(days=1)
assert trange[0] == check
assert trange[1] > out['stops'][b_range]
check = out['stops'][b_range] + dt.timedelta(days=1)
assert trange[1] < check
elif i == len(out['observed_times']) - 1:
# last load at start of bounds
assert trange[0] == out['starts'][b_range]
assert trange[1] > out['starts'][b_range]
assert trange[1] < out['starts'][b_range] + out['width']
return
def verify_exclusive_iteration(self, out, forward=True):
"""Verify loaded dates for exclusive iteration, forward or backward"""
# verify range of loaded data
if forward:
for i, trange in enumerate(out['observed_times']):
# determine which range we are in
b_range = 0
while out['expected_times'][i] > out['stops'][b_range]:
b_range += 1
# check loaded range is correct
assert trange[0] == out['expected_times'][i]
check = out['expected_times'][i] + out['width']
check -= dt.timedelta(days=1)
assert trange[1] > check
assert trange[1] < out['stops'][b_range]
else:
for i, trange in enumerate(out['observed_times']):
# determine which range we are in
b_range = 0
while out['expected_times'][i] > out['stops'][b_range]:
b_range += 1
# check start against expectations
assert trange[0] == out['expected_times'][i]
# check end against expectations
check = out['expected_times'][i] + out['width']
check -= dt.timedelta(days=1)
assert trange[1] > check
check = out['stops'][b_range] + dt.timedelta(days=1)
assert trange[1] < check
if i == 0:
# check first load is before end of bounds
check = out['stops'][b_range] - out['width']
check += dt.timedelta(days=1)
assert trange[0] < check
assert trange[1] < out['stops'][b_range]
elif i == len(out['observed_times']) - 1:
# last load at start of bounds
assert trange[0] == out['starts'][b_range]
assert trange[1] > out['starts'][b_range]
assert trange[1] < out['starts'][b_range] + out['width']
return
@pytest.mark.parametrize("values", [(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 3), '2D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 4), '2D',
dt.timedelta(days=3)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 5), '3D',
dt.timedelta(days=1)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 17), '5D',
dt.timedelta(days=1))
])
def test_iterate_bounds_with_frequency_and_width(self, values):
"""Iterate via date with mixed step/width, excludes stop date"""
out = self.support_iter_evaluations(values, for_loop=True)
# verify range of loaded data
self.verify_exclusive_iteration(out, forward=True)
return
@pytest.mark.parametrize("values", [(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 4), '2D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 4), '3D',
dt.timedelta(days=1)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 4), '1D',
dt.timedelta(days=4)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 5), '4D',
dt.timedelta(days=1)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 5), '2D',
dt.timedelta(days=3)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 5), '3D',
dt.timedelta(days=2))])
def test_iterate_bounds_with_frequency_and_width_incl(self, values):
"""Iterate via date with mixed step/width, includes stop date"""
out = self.support_iter_evaluations(values, for_loop=True)
# verify range of loaded data
self.verify_inclusive_iteration(out, forward=True)
return
@pytest.mark.parametrize("values", [(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10), '2D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 9), '4D',
dt.timedelta(days=1)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 11), '1D',
dt.timedelta(days=3)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 11), '1D',
dt.timedelta(days=11)),
])
def test_next_date_with_frequency_and_width_incl(self, values):
"""Test .next() via date step/width>1, includes stop date"""
out = self.support_iter_evaluations(values)
# verify range of loaded data
self.verify_inclusive_iteration(out, forward=True)
return
@pytest.mark.parametrize("values", [(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 11), '2D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 12), '2D',
dt.timedelta(days=3)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 13), '3D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 3), '4D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 12), '2D',
dt.timedelta(days=1))])
def test_next_date_with_frequency_and_width(self, values):
"""Test .next() via date step/width>1, excludes stop date"""
out = self.support_iter_evaluations(values)
# verify range of loaded data
self.verify_exclusive_iteration(out, forward=True)
return
@pytest.mark.parametrize("values", [((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 4),
dt.datetime(2009, 1, 13)),
'2D',
dt.timedelta(days=2)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 7),
dt.datetime(2009, 1, 16)),
'3D',
dt.timedelta(days=1)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 6),
dt.datetime(2009, 1, 15)),
'2D',
dt.timedelta(days=4))
])
def test_next_date_season_frequency_and_width_incl(self, values):
"""Test .next() via date season step/width>1, includes stop date"""
out = self.support_iter_evaluations(values)
# verify range of loaded data
self.verify_inclusive_iteration(out, forward=True)
return
@pytest.mark.parametrize("values", [((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 3),
dt.datetime(2009, 1, 12)),
'2D',
dt.timedelta(days=2)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 6),
dt.datetime(2009, 1, 15)),
'3D',
dt.timedelta(days=1)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 7),
dt.datetime(2009, 1, 16)),
'2D',
dt.timedelta(days=4))
])
def test_next_date_season_frequency_and_width(self, values):
"""Test .next() via date season step/width>1, excludes stop date"""
out = self.support_iter_evaluations(values)
# verify range of loaded data
self.verify_exclusive_iteration(out, forward=True)
return
@pytest.mark.parametrize("values", [(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10), '2D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 9), '4D',
dt.timedelta(days=1)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 11), '1D',
dt.timedelta(days=3)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 11), '1D',
dt.timedelta(days=11)),
])
def test_prev_date_with_frequency_and_width_incl(self, values):
"""Test .prev() via date step/width>1, includes stop date"""
out = self.support_iter_evaluations(values, reverse=True)
# verify range of loaded data
self.verify_inclusive_iteration(out, forward=False)
return
@pytest.mark.parametrize("values", [(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 11), '2D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 12), '2D',
dt.timedelta(days=3)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 13), '3D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 3), '4D',
dt.timedelta(days=2)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 12), '2D',
dt.timedelta(days=1))])
def test_prev_date_with_frequency_and_width(self, values):
"""Test .prev() via date step/width>1, excludes stop date"""
out = self.support_iter_evaluations(values, reverse=True)
# verify range of loaded data
self.verify_exclusive_iteration(out, forward=False)
return
@pytest.mark.parametrize("values", [((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 4),
dt.datetime(2009, 1, 13)),
'2D',
dt.timedelta(days=2)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 7),
dt.datetime(2009, 1, 16)),
'3D',
dt.timedelta(days=1)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 6),
dt.datetime(2009, 1, 15)),
'2D',
dt.timedelta(days=4))
])
def test_prev_date_season_frequency_and_width_incl(self, values):
"""Test .prev() via date season step/width>1, includes stop date"""
out = self.support_iter_evaluations(values, reverse=True)
# verify range of loaded data
self.verify_inclusive_iteration(out, forward=False)
return
@pytest.mark.parametrize("values", [((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 3),
dt.datetime(2009, 1, 12)),
'2D',
dt.timedelta(days=2)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 6),
dt.datetime(2009, 1, 15)),
'3D',
dt.timedelta(days=1)),
((dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 10)),
(dt.datetime(2009, 1, 7),
dt.datetime(2009, 1, 16)),
'2D',
dt.timedelta(days=4))
])
def test_prev_date_season_frequency_and_width(self, values):
"""Test .prev() via date season step/width>1, excludes stop date"""
out = self.support_iter_evaluations(values, reverse=True)
# verify range of loaded data
self.verify_exclusive_iteration(out, forward=False)
return
def test_set_bounds_too_few(self):
start = dt.datetime(2009, 1, 1)
with pytest.raises(ValueError):
self.testInst.bounds = [start]
def test_set_bounds_mixed(self):
start = dt.datetime(2009, 1, 1)
with pytest.raises(ValueError):
self.testInst.bounds = [start, '2009-01-01.nofile']
def test_set_bounds_wrong_type(self):
"""Test Exception when setting bounds with inconsistent types"""
start = dt.datetime(2009, 1, 1)
with pytest.raises(ValueError):
self.testInst.bounds = [start, 1]
def test_set_bounds_mixed_iterable(self):
start = [dt.datetime(2009, 1, 1)] * 2
with pytest.raises(ValueError):
self.testInst.bounds = [start, '2009-01-01.nofile']
def test_set_bounds_mixed_iterabless(self):
start = [dt.datetime(2009, 1, 1)] * 2
with pytest.raises(ValueError):
self.testInst.bounds = [start, [dt.datetime(2009, 1, 1),
'2009-01-01.nofile']]
def test_set_bounds_string_default_start(self):
self.testInst.bounds = [None, '2009-01-01.nofile']
assert self.testInst.bounds[0][0] == self.testInst.files[0]
def test_set_bounds_string_default_end(self):
self.testInst.bounds = ['2009-01-01.nofile', None]
assert self.testInst.bounds[1][0] == self.testInst.files[-1]
def test_set_bounds_too_many(self):
"""Ensure error if too many inputs to inst.bounds"""
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 1)
width = dt.timedelta(days=1)
with pytest.raises(ValueError) as err:
self.testInst.bounds = [start, stop, '1D', width, False]
estr = 'Too many input arguments.'
assert str(err).find(estr) >= 0
def test_set_bounds_by_date(self):
"""Test setting bounds with datetimes over simple range"""
start = dt.datetime(2009, 1, 1)
stop = dt.datetime(2009, 1, 15)
self.testInst.bounds = (start, stop)
assert np.all(self.testInst._iter_list
== pds.date_range(start, stop).tolist())
def test_set_bounds_by_date_wrong_order(self):
"""Test error if bounds assignment has stop date before start"""
start = dt.datetime(2009, 1, 15)
stop = dt.datetime(2009, 1, 1)
with pytest.raises(Exception) as err:
self.testInst.bounds = (start, stop)
estr = 'Bounds must be set in increasing'
assert str(err).find(estr) >= 0
def test_set_bounds_by_default_dates(self):
"""Verify bounds behavior with default date related inputs"""
start = self.testInst.files.start_date
stop = self.testInst.files.stop_date
full_list = pds.date_range(start, stop).tolist()
self.testInst.bounds = (None, None)
assert np.all(self.testInst._iter_list == full_list)
self.testInst.bounds = None
assert np.all(self.testInst._iter_list == full_list)
self.testInst.bounds = (start, None)
assert np.all(self.testInst._iter_list == full_list)
self.testInst.bounds = (None, stop)
assert np.all(self.testInst._iter_list == full_list)
def test_set_bounds_by_date_extra_time(self):
start = dt.datetime(2009, 1, 1, 1, 10)
stop = dt.datetime(2009, 1, 15, 1, 10)
self.testInst.bounds = (start, stop)
start = filter_datetime_input(start)
stop = filter_datetime_input(stop)
assert np.all(self.testInst._iter_list
== pds.date_range(start, stop).tolist())
@pytest.mark.parametrize("start,stop", [(dt.datetime(2010, 12, 1),
dt.datetime(2010, 12, 31)),
(dt.datetime(2009, 1, 1),
dt.datetime(2009, 1, 15))
])
def test_iterate_over_bounds_set_by_date(self, start, stop):
"""Test iterating over bounds via single date range"""
self.testInst.bounds = (start, stop)
dates = []
for inst in self.testInst:
dates.append(inst.date)
out = pds.date_range(start, stop).tolist()
assert np.all(dates == out)
def test_iterate_over_default_bounds(self):
"""Test iterating over default bounds"""
date_range = pds.date_range(self.ref_time,
self.ref_time + dt.timedelta(days=10))
self.testInst.kwargs['list_files']['file_date_range'] = date_range
self.testInst.files.refresh()
self.testInst.bounds = (None, None)
dates = []
for inst in self.testInst:
dates.append(inst.date)
out = date_range.tolist()
assert np.all(dates == out)
def test_set_bounds_by_date_season(self):
start = [dt.datetime(2009, 1, 1), dt.datetime(2009, 2, 1)]
stop = [dt.datetime(2009, 1, 15), dt.datetime(2009, 2, 15)]
self.testInst.bounds = (start, stop)
out = | pds.date_range(start[0], stop[0]) | pandas.date_range |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 00:30, 01/06/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from pandas import read_csv
from config import Config
from pandas import DataFrame
def save_fast_to_csv(list_results, list_paths, columns):
for idx, results in enumerate(list_results):
df = DataFrame(results, columns=columns)
df.to_csv(list_paths[idx], index=False)
return True
PROBLEM_SIZE = 30
LIST_FUNCTIONS = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20"]
LIST_MHAS = ["GA", "SAP_DE", "WOA", "COA", "HGS", "LCBO", "CHIO", "SLO", "ImprovedSLO"]
LIST_NAMES = ["GA", "SAP-DE", "HI-WOA", "COA", "HGS", "M-LCO","CHIO", "SLO", "ISLO"]
final_results = []
for func_name in LIST_FUNCTIONS:
for idx, mha in enumerate(LIST_MHAS):
filesave = f"{Config.BENCHMARK_RESULTS}/statistics.csv"
df = | read_csv(f"{Config.BENCHMARK_BEST_FIT}/{PROBLEM_SIZE}D_{mha}_best_fit.csv", usecols=["function", "time", "trial", "fit"]) | pandas.read_csv |
#!/usr/bin/env bash
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import json
import numpy as np
import os
import pandas as pd
import pickle
import sys
import tensorflow as tf
from itertools import compress
from keras import backend as keras_backend
from thesis.classification.semisupervised import ActiveLearningWrapper
from thesis.dataset import SenseCorpusDatasets, UnlabeledCorpusDataset
from thesis.dataset.utils import NotEnoughSensesError
from thesis.utils import try_number
from thesis.constants import CLASSIFIERS
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('labeled_dataset_path')
parser.add_argument('base_results_path')
parser.add_argument('--unlabeled_dataset_path', default=None)
parser.add_argument('--full_senses_path', default=None)
parser.add_argument('--sentences_path', default=None)
parser.add_argument('--max_annotations', type=int, default=0)
parser.add_argument('--simulation_indices_path', default=None)
parser.add_argument('--word_vector_model_path', default=None)
parser.add_argument('--labeled_dataset_extra', default=None)
parser.add_argument('--unlabeled_dataset_extra', default=None)
parser.add_argument('--classifier', type=str, default='svm')
parser.add_argument('--classifier_config_file', type=str, default=None)
parser.add_argument('--classifier_config', type=lambda config: tuple(config.split('=')),
default=list(), nargs='+')
parser.add_argument('--layers', type=int, nargs='+', default=list())
parser.add_argument('--unlabeled_data_limit', type=int, default=1000)
parser.add_argument('--candidates_limit', type=int, default=0)
parser.add_argument('--min_count', type=int, default=2)
parser.add_argument('--validation_ratio', type=float, default=0.2)
parser.add_argument('--candidates_selection', default='min')
parser.add_argument('--error_sigma', type=float, default=0.1)
parser.add_argument('--random_seed', type=int, default=1234)
parser.add_argument('--folds', type=int, default=0)
parser.add_argument('--annotation_lemmas', nargs='+', default=set())
parser.add_argument('--corpus_name', default='NA')
parser.add_argument('--representation', default='NA')
parser.add_argument('--vector_domain', default='NA')
args = parser.parse_args()
if (args.unlabeled_dataset_path is None) == (args.simulation_indices_path is None):
print('Either give an unlabeled dataset path or a simulation indices path', file=sys.stderr)
sys.exit(1)
if args.classifier_config_file is not None:
with open(args.classifier_config_file, 'r') as fconfig:
config = json.load(fconfig)
else:
config = {}
args.classifier_config = [args.classifier_config] \
if isinstance(args.classifier_config, tuple) else args.classifier_config
if args.annotation_lemmas:
args.annotation_lemmas = set(args.annotation_lemmas) if not isinstance(args.annotation_lemmas, set) else\
args.annotation_lemmas
for key, value in args.classifier_config:
config[key] = try_number(value)
if args.classifier == 'svm':
config['kernel'] = 'linear'
config['probability'] = True
if args.layers:
args.layers = [args.layers] if isinstance(args.layers, int) else args.layers
config['layers'] = args.layers
labeled_datasets_path = os.path.join(args.labeled_dataset_path, '%s_dataset.npz')
labeled_features_path = os.path.join(args.labeled_dataset_path, '%s_features.p')
labeled_datasets_extra_path = os.path.join(args.labeled_dataset_extra, '%s_dataset.npz') \
if args.labeled_dataset_extra is not None else None
print('Loading labeled dataset', file=sys.stderr)
labeled_datasets = SenseCorpusDatasets(train_dataset_path=labeled_datasets_path % 'train',
train_features_dict_path=labeled_features_path % 'train'
if args.word_vector_model_path is None else None,
test_dataset_path=labeled_datasets_path % 'test',
test_features_dict_path=labeled_features_path % 'test'
if args.word_vector_model_path is None else None,
word_vector_model_path=args.word_vector_model_path,
train_dataset_extra=labeled_datasets_extra_path % 'train'
if labeled_datasets_extra_path is not None else None,
test_dataset_extra=labeled_datasets_extra_path % 'test'
if labeled_datasets_extra_path is not None else None)
full_senses_dict = None
if args.unlabeled_dataset_path:
unlabeled_dataset_path = os.path.join(args.unlabeled_dataset_path, 'dataset.npz')
unlabeled_features_path = os.path.join(args.unlabeled_dataset_path, 'features.p')
unlabeled_dataset_extra_path = os.path.join(args.unlabeled_dataset_extra, 'dataset.npz') \
if args.unlabeled_dataset_extra is not None else None
print('Loading unlabeled dataset', file=sys.stderr)
unlabeled_dataset = UnlabeledCorpusDataset(dataset_path=unlabeled_dataset_path,
features_dict_path=unlabeled_features_path
if args.word_vector_model_path is None else None,
word_vector_model=labeled_datasets.train_dataset.word_vector_model,
dataset_extra=unlabeled_dataset_extra_path)
initial_indices = None
unlabeled_indices = None
else:
simulation_indices = np.load(args.simulation_indices_path)
initial_indices = simulation_indices['initial_indices']
unlabeled_indices = simulation_indices['unlabeled_indices']
unlabeled_dataset = None
if args.full_senses_path is not None:
with open(args.full_senses_path, 'rb') as f:
full_senses_dict = pickle.load(f)
prediction_results = []
certainty_progression = []
features_progression = []
cross_validation_results = []
overfitting_measure_results = []
senses = []
results = (prediction_results, certainty_progression, features_progression,
cross_validation_results, overfitting_measure_results)
bootstrapped_instances = []
bootstrapped_targets = []
text_sentences = {}
if args.sentences_path is not None:
with open(args.sentences_path, 'r') as sfin:
for line in sfin:
iid, sent = line.strip().split('\t', 1)
text_sentences[iid] = sent
print('Running experiments per lemma', file=sys.stderr)
for lemma, data, target, features in \
tqdm(labeled_datasets.train_dataset.traverse_dataset_by_lemma(return_features=True),
total=labeled_datasets.train_dataset.num_lemmas):
if unlabeled_dataset and not unlabeled_dataset.has_lemma(lemma):
continue
if args.annotation_lemmas and lemma not in args.annotation_lemmas:
continue
try:
tf.reset_default_graph()
with tf.Session() as sess:
keras_backend.set_session(sess)
if unlabeled_dataset is not None:
unlabeled_data = unlabeled_dataset.data(lemma, limit=args.unlabeled_data_limit)
unlabeled_target = np.array([])
unlabeled_features = unlabeled_dataset.features_dictionaries(lemma, limit=args.unlabeled_data_limit)
instances_id = unlabeled_dataset.instances_id(lemma, limit=args.unlabeled_data_limit)
lemma_unlabeled_sentences = [text_sentences[':'.join(iid)] for iid in instances_id]
else:
li = np.in1d(labeled_datasets.train_dataset.lemmas_index(lemma), initial_indices)
ui = np.in1d(labeled_datasets.train_dataset.lemmas_index(lemma), unlabeled_indices)
unlabeled_data = data[ui]
unlabeled_target = target[ui]
unlabeled_features = list(compress(features, ui))
data = data[li]
target = target[li]
features = list(compress(features, li))
lemma_unlabeled_sentences = None
test_data = labeled_datasets.test_dataset.data(lemma)
test_target = labeled_datasets.test_dataset.target(lemma)
_, zero_based_indices = np.unique(np.concatenate([target, unlabeled_target, test_target]),
return_inverse=True)
train_classes = {label: idx for idx, label in
enumerate(labeled_datasets.train_dataset.train_classes(lemma))}
train_size = target.shape[0]
unlabeled_size = unlabeled_target.shape[0]
test_size = test_target.shape[0]
target = zero_based_indices[:train_size]
unlabeled_target = zero_based_indices[train_size:train_size+unlabeled_size]
test_target = zero_based_indices[train_size+unlabeled_size:]
semisupervised = ActiveLearningWrapper(
labeled_train_data=data, labeled_train_target=target, labeled_test_data=test_data,
labeled_test_target=test_target, unlabeled_data=unlabeled_data, unlabeled_target=unlabeled_target,
lemma=lemma, labeled_features=features, min_count=args.min_count, full_senses_dict=full_senses_dict,
validation_ratio=args.validation_ratio, candidates_selection=args.candidates_selection,
candidates_limit=args.candidates_limit, unlabeled_features=unlabeled_features,
error_sigma=args.error_sigma, folds=args.folds, random_seed=args.random_seed,
acceptance_threshold=0, unlabeled_sentences=lemma_unlabeled_sentences, train_classes=train_classes,
max_annotations=args.max_annotations
)
iterations = semisupervised.run(CLASSIFIERS[args.classifier], config)
if iterations > 0:
for rst_agg, rst in zip(results, semisupervised.get_results()):
if rst is not None:
rst.insert(0, 'num_classes', semisupervised.classes.shape[0])
rst.insert(0, 'lemma', lemma)
rst.insert(0, 'candidates_selection', args.candidates_selection)
rst.insert(0, 'layers', '_'.join(str(l) for l in args.layers) if args.layers else 'NA')
rst.insert(0, 'classifier', args.classifier)
rst.insert(0, 'algorithm', 'active_learning')
rst.insert(0, 'vector_domain', args.vector_domain or 'NA')
rst.insert(0, 'representation', args.representation or 'NA')
rst.insert(0, 'corpus', args.corpus_name)
rst_agg.append(rst)
if args.full_senses_path is not None:
senses.append(semisupervised.get_senses())
# Save the bootstrapped data (if there is unlabeled data to save)
if unlabeled_dataset is not None:
bi, bt = semisupervised.bootstrapped()
bootstrapped_targets.extend(bt)
ul_instances = unlabeled_dataset.instances_id(lemma, limit=args.unlabeled_data_limit)
bootstrapped_instances.extend(':'.join(ul_instances[idx]) for idx in bi)
else:
tqdm.write('Lemma %s - No iterations' % lemma, file=sys.stderr)
except NotEnoughSensesError:
tqdm.write('Lemma %s - Not enough senses with at least %d occurrences'
% (lemma, args.min_count), file=sys.stderr)
continue
print('Saving results', file=sys.stderr)
try:
pd.DataFrame({'instance': bootstrapped_instances, 'predicted_target': bootstrapped_targets}) \
.to_csv('%s_unlabeled_dataset_predictions.csv' % args.base_results_path, index=False, float_format='%.2e')
except (ValueError, MemoryError) as e:
print(e.args, file=sys.stderr)
try:
pd.concat(prediction_results, ignore_index=True) \
.to_csv('%s_prediction_results.csv' % args.base_results_path, index=False, float_format='%.2e')
except (ValueError, MemoryError) as e:
print(e.args, file=sys.stderr)
try:
pd.concat(certainty_progression, ignore_index=True) \
.to_csv('%s_certainty_progression.csv' % args.base_results_path, index=False, float_format='%.2e')
except (ValueError, MemoryError) as e:
print(e.args, file=sys.stderr)
try:
| pd.concat(features_progression, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import random
import math
'''
Allocate nodes for each update in different scenarios
'''
# uniform allocation, i.e., scenario 1
def uniform_alloc(data, random_seed, available_num):
random.seed(random_seed)
nodes = []
for i in range(data.shape[0]):
nodes.append("".join(random.sample(['a', 'b', 'c', 'd', 'e', 'f'], available_num))) # available nodes per update
return pd.DataFrame(nodes)
# exponential distribution
def exponential_alloc(data, lamb, random_seed):
random.seed(random_seed)
p1, p2, p3, p4, p5, p6 = lamb * math.exp(-lamb * 1), lamb * math.exp(-lamb * 2), lamb * math.exp(
-lamb * 3), lamb * math.exp(-lamb * 4), lamb * math.exp(-lamb * 5), lamb * math.exp(-lamb * 6)
p_sum = p1 + p2 + p3 + p4 + p5 + p6
p1, p2, p3, p4, p5, p6 = p1 / p_sum, p2 / p_sum, p3 / p_sum, p4 / p_sum, p5 / p_sum, p6 / p_sum
nodes = []
probs = [p1, p2, p3, p4, p5, p6]
for i in range(data.shape[0]):
nodes.append("".join(np.random.choice(['a', 'b', 'c', 'd', 'e', 'f'], 3, p=probs, replace=False).tolist()))
return pd.DataFrame(nodes)
# poisson distribution
def poi(k, lamb):
return (lamb ** k) * math.exp(-lamb) / math.factorial(k)
def poisson_alloc(data, lamb, random_seed):
random.seed(random_seed)
p1, p2, p3, p4, p5 = poi(1, lamb), poi(2, lamb), poi(3, lamb), poi(4, lamb), poi(5, lamb)
p_sum = p1 + p2 + p3 + p4 + p5
p1, p2, p3, p4, p5 = p1 / p_sum, p2 / p_sum, p3 / p_sum, p4 / p_sum, p5 / p_sum
nodes = []
for i in range(data.shape[0]):
tempnodes = random.sample(['a', 'b', 'c', 'd', 'e', 'f'], 5)
rand = random.random()
if rand < p1:
num = 1
elif rand < p1 + p2:
num = 2
elif rand < p1 + p2 + p3:
num = 3
elif rand < p1 + p2 + p3 + p4:
num = 4
else:
num = 5
tempnodes = random.sample(tempnodes, num)
nodes.append("".join(tempnodes))
return pd.DataFrame(nodes)
# partition & failure scenario
def available_nodes_alloc(available_nodes, random_seed):
random.seed(random_seed)
nodes = []
for available_node in available_nodes:
if len(available_node) == 6:
tempnodes = random.sample(available_node, 3)
else:
tempnodes = random.sample(available_node, 1)
nodes.append("".join(tempnodes))
return pd.DataFrame(nodes)
# for **uniform, poisson, exponential** to generate current_times and test_times
def test_set_gen(test_set, test_size, random_seed):
random.seed(random_seed)
current_times = []
test_times = []
for i in range(test_set.shape[0] - 1):
current_times.append([test_set.iloc[i, 0], test_set.iloc[i + 1, 0], test_set.iloc[i, 1]])
for current_time in current_times:
test_time = random.sample(range(int(current_time[0]), int(current_time[1])), 1)[0]
test_times.append(test_time)
if len(test_times) >= test_size:
break
return pd.DataFrame(current_times[0:test_size]), pd.DataFrame(test_times[0:test_size])
# sample times for scenarios
def sample_scenario_period(update_times, scenario_num, length):
scenario_periods = []
total_times = list(range(int(update_times[0]), int(update_times[len(update_times) - 1])))
for i in range(scenario_num):
# print('unavailable_times:', len(unavailable_times))
scenario_time = random.sample(total_times, 1)
for timepoint in scenario_time:
for time in range(timepoint - length, timepoint + length):
if time in total_times:
total_times.remove(time)
scenario_periods.append(time)
return scenario_periods
# frequency changing scenario
def frequency_test_set_gen(random_seed, test_set, test_size, scenario_num, length, frequency_normal, frequency_low):
random.seed(random_seed)
current_times = []
test_times = []
frequency_bit = 0
for i in range(test_set.shape[0] - 1):
if frequency_bit % 3 == 0: # high frequency
current_times.append([test_set.iloc[i, 0], test_set.iloc[i + 1, 0], test_set.iloc[i, 1]])
elif frequency_bit % 3 == 1: # normal scenario
if i % frequency_normal == 0 or i % frequency_bit == int(frequency_normal / 2):
current_times.append([test_set.iloc[i, 0], test_set.iloc[i + 1, 0], test_set.iloc[i, 1]])
else: # low frequency
if i % frequency_low == 0 or i % frequency_bit == int(frequency_low / 2):
current_times.append([test_set.iloc[i, 0], test_set.iloc[i + 1, 0], test_set.iloc[i, 1]])
if i % length == 0:
frequency_bit += 1
if (frequency_bit * 2 / 3) >= scenario_num:
frequency_bit = 1
for current_time in current_times:
test_time = random.sample(range(int(current_time[0]), int(current_time[1])), 1)[0]
test_times.append(test_time)
if len(test_times) >= test_size:
break
test_times = pd.DataFrame(test_times)
# print('len(test_times):', len(test_times))
test_times = pd.concat([test_times, uniform_alloc(test_times, random_seed, 3)], axis=1).dropna()
return pd.DataFrame(current_times[0:test_size]), pd.DataFrame(test_times[0:test_size])
# for node failure scenario to generate available nodes for each update
def failure_nodes_gen(random_seed, fail_num, length, update_times):
random.seed(random_seed)
available_nodes_list = ['a', 'b', 'c', 'd', 'e', 'f']
unavailable_times = sample_scenario_period(update_times, fail_num, length)
# select which 3 nodes to disable for each failure
nodes = []
available_nodes = []
nodecount = 0
count = 0
for i in range(fail_num):
random.seed(random_seed)
nodes.append(random.sample(available_nodes_list, 3))
for update_time in update_times:
if int(update_time) in unavailable_times:
count += 1
available_nodes.append(nodes[nodecount])
if count % length == 0:
nodecount += 1
else:
available_nodes.append(available_nodes_list)
return available_nodes
# for network partition scenario to generate available nodes for each update
def partition_nodes_gen(random_seed, scenario_num, length, update_times):
random.seed(random_seed)
available_nodes_list = ['a', 'b', 'c', 'd', 'e', 'f']
unavailable_times = sample_scenario_period(update_times, scenario_num, length)
# 3 nodes to write and other 3 to read
read_nodes = []
write_nodes = []
for i in range(scenario_num):
random.seed(random_seed)
nodes_to_read = random.sample(available_nodes_list, 3)
read_nodes.append(nodes_to_read)
write_node = []
for node in available_nodes_list:
if node not in nodes_to_read:
write_node.append(node)
write_nodes.append(write_node)
available_nodes = []
rnodecount = 0
wnodecount = 0
rcount = 0
wcount = 0
i = 0
for update_time in update_times:
available_node = []
if i % 2 == 0:
if int(update_time) in unavailable_times:
rcount += 1
available_nodes.append(read_nodes[rnodecount])
if rcount % length == 0:
rnodecount += 1
else:
for node in available_nodes_list:
available_node.append(node)
available_nodes.append(available_node)
else:
if int(update_time) in unavailable_times:
wcount += 1
available_nodes.append(write_nodes[wnodecount])
if wcount % length == 0:
wnodecount += 1
else:
for node in available_nodes_list:
available_node.append(node)
available_nodes.append(available_node)
i += 1
return available_nodes
# for network failure & node partition scenario to generate current_times and test_times
def failure_partition_test_set_gen(splitmode, test_set, test_size, random_seed, scenario_num, length):
random.seed(random_seed)
current_times = []
test_times = []
# generate current_times and sample test_times
for i in range(test_set.shape[0] - 1):
current_times.append([test_set.iloc[i, 0], test_set.iloc[i + 1, 0]])
for current_time in current_times:
# print(int(current_time[0]), int(current_time[1]))
test_time = random.sample(range(int(current_time[0]), int(current_time[1])), 1)[0]
test_times.append(test_time)
# merge current_times and test_times to apply scenarios
current_times = | pd.DataFrame(current_times[0:test_size]) | pandas.DataFrame |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="fixed")
msg = r"invalid HDFStore format specified \[foo\]"
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=False, format="foo")
# File path doesn't exist
path = ""
msg = f"File {path} does not exist"
with pytest.raises(FileNotFoundError, match=msg):
read_hdf(path, "df")
def test_get(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
def test_put_integer(setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
_check_roundtrip(df, tm.assert_frame_equal, setup_path)
def test_table_values_dtypes_roundtrip(setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
msg = re.escape(
"invalid combination of [values_axes] on appending data "
"[name->values_block_0,cname->values_block_0,"
"dtype->float64,kind->float,shape->(1, 3)] vs "
"current table [name->values_block_0,"
"cname->values_block_0,dtype->int64,kind->integer,"
"shape->None]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = | Timestamp("20130102") | pandas._libs.tslibs.Timestamp |
import os
import param
import pandas as pd
import concurrent.futures
from ulmo.usgs import nwis
from functools import partial
from quest import util
from quest.static import ServiceType, GeomType, DataType
from quest.plugins import ProviderBase, TimePeriodServiceBase, load_plugins
BASE_PATH = 'usgs-nwis'
class NwisServiceBase(TimePeriodServiceBase):
period = param.String(default='P365D', precedence=4, doc='time period (e.g. P365D = 365 days or P4W = 4 weeks)')
def download(self, catalog_id, file_path, dataset, **kwargs):
p = param.ParamOverrides(self, kwargs)
parameter = p.parameter
start = p.start
end = p.end
period = p.period
if dataset is None:
dataset = 'station-' + catalog_id
if start and end:
period = None
pmap = self.parameter_map(invert=True)
parameter_code, statistic_code = (pmap[parameter].split(':') + [None])[:2]
data = nwis.get_site_data(catalog_id,
parameter_code=parameter_code,
statistic_code=statistic_code,
start=start, end=end, period=period,
service=self.service_name)
# dict contains only one key since only one parameter/statistic was
# downloaded, this would need to be changed if multiple
# parameter/stat were downloaded together
if not data:
raise ValueError('No Data Available')
data = list(data.values())[0]
# convert to dataframe and cleanup bad data
df = pd.DataFrame(data['values'])
if df.empty:
raise ValueError('No Data Available')
df = df.set_index('datetime')
df.value = df.value.astype(float)
if statistic_code in ['00001', '00002', '00003']:
df.index = pd.to_datetime(df.index).to_period('D')
else:
df.index = pd.to_datetime(df.index) # this is in UTC
df[df.values == -999999] = pd.np.nan
df.rename(columns={'value': parameter}, inplace=True)
file_path = os.path.join(file_path, BASE_PATH, self.service_name, dataset, '{0}.h5'.format(dataset))
del data['values']
metadata = {
'name': dataset,
'metadata': data,
'file_path': file_path,
'file_format': 'timeseries-hdf5',
'datatype': DataType.TIMESERIES,
'parameter': parameter,
'unit': data['variable']['units']['code'],
'service_id': 'svc://usgs-nwis:{}/{}'.format(self.service_name, catalog_id)
}
# save data to disk
io = load_plugins('io', 'timeseries-hdf5')['timeseries-hdf5']
io.write(file_path, df, metadata)
del metadata['service_id']
return metadata
def search_catalog(self, **kwargs):
func = partial(_nwis_catalog_entries, service=self.service_name)
with concurrent.futures.ProcessPoolExecutor() as executor:
sites = executor.map(func, _states())
sites = {k: v for d in sites for k, v in d.items()}
df = pd.DataFrame.from_dict(sites, orient='index')
for col in ['latitude', 'longitude']:
df[col] = df['location'].apply(lambda x: float(x[col]))
df.rename(columns={
'code': 'service_id',
'name': 'display_name',
}, inplace=True)
return df
def get_parameters(self, catalog_ids=None):
df = self.search_catalog()
chunks = list(_chunks(df.index.tolist()))
func = partial(_site_info, service=self.service_name)
with concurrent.futures.ProcessPoolExecutor(max_workers=None) as executor:
data = executor.map(func, chunks)
data = pd.concat(data, ignore_index=True)
data['parameter_code'] = data['parm_cd']
idx = pd.notnull(data['stat_cd'])
data.loc[idx, 'parameter_code'] += ':' + data['stat_cd']
data['external_vocabulary'] = 'USGS-NWIS'
data.rename(columns={
'site_no': 'service_id',
'count_nu': 'count'
},
inplace=True)
data = data[pd.notnull(data['parameter_code'])]
data['parameter'] = data['parameter_code'].apply(
lambda x: self._parameter_map.get(x)
)
pm_codes = _pm_codes()
data['description'] = data['parm_cd'].apply(
lambda x: pm_codes.loc[x]['SRSName'] if x in pm_codes.index else ''
)
data['unit'] = data['parm_cd'].apply(
lambda x: pm_codes.loc[x]['parm_unit'] if x in pm_codes.index else ''
)
cols = ['parameter', 'parameter_code', 'external_vocabulary',
'service_id', 'description', 'begin_date',
'end_date', 'count',
]
data = data[cols]
# datasets need to have required quest metadata and external metadata
# need to keep track of units/data classification/restrictions
return data
class NwisServiceIV(NwisServiceBase):
service_name = 'iv'
display_name = 'NWIS Instantaneous Values Service'
description = 'Retrieve current streamflow and other real-time data for USGS water sites since October 1, 2007'
service_type = ServiceType.GEO_DISCRETE
unmapped_parameters_available = True
geom_type = GeomType.POINT
datatype = DataType.TIMESERIES
geographical_areas = ['Alaska', 'USA', 'Hawaii']
bounding_boxes = [
(-178.19453125, 51.6036621094, -130.0140625, 71.4076660156),
(-124.709960938, 24.5423339844, -66.9870117187, 49.3696777344),
(-160.243457031, 18.9639160156, -154.804199219, 22.2231445312),
]
_parameter_map = {
'00060': 'streamflow',
'00065': 'gage_height',
'00010': 'water_temperature',
}
parameter = param.ObjectSelector(default=None, doc='parameter', precedence=1, objects=sorted(_parameter_map.values()))
class NwisServiceDV(NwisServiceBase):
service_name = 'dv'
display_name = 'NWIS Daily Values Service'
description = 'Retrieve historical summarized daily data about streams, lakes and wells. Daily data available ' \
'for USGS water sites include mean, median, maximum, minimum, and/or other derived values.'
service_type = ServiceType.GEO_DISCRETE
unmapped_parameters_available = True
geom_type = GeomType.POINT
datatype = DataType.TIMESERIES
geographical_areas = ['Alaska', 'USA', 'Hawaii']
bounding_boxes = [
(-178.19453125, 51.6036621094, -130.0140625, 71.4076660156),
(-124.709960938, 24.5423339844, -66.9870117187, 49.3696777344),
(-160.243457031, 18.9639160156, -154.804199219, 22.2231445312),
]
_parameter_map = {
'00060:00003': 'streamflow:mean:daily',
'00010:00001': 'water_temperature:daily:min',
'00010:00002': 'water_temperature:daily:max',
'00010:00003': 'water_temperature:daily:mean',
}
parameter = param.ObjectSelector(default=None, doc='parameter', precedence=1, objects=sorted(_parameter_map.values()))
class NwisProvider(ProviderBase):
service_list = [NwisServiceIV, NwisServiceDV]
display_name ='USGS NWIS Web Services'
description = 'Services available through the USGS National Water Information System'
organization_name = 'United States Geological Survey'
organization_abbr = 'USGS'
name = 'usgs-nwis'
def _chunks(l, n=100):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
def _nwis_catalog_entries(state, service):
return nwis.get_sites(state_code=state, service=service)
def _nwis_parameters(site, service):
return {site: list(nwis.get_site_data(site, service=service).keys())}
def _states():
return [
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"
]
def _parse_rdb(url, index=None):
df = | pd.read_csv(url, sep='\t', comment='#') | pandas.read_csv |
from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_extension_array_dtype,
is_list_like,
is_scalar,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexers import maybe_convert_indices
from pandas.io.formats.printing import pprint_thing
from .blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from .concat import ( # all for concatenate_block_managers
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks) # type: Tuple[Block, ...]
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
"Number of Block dimensions ({block}) must equal "
"number of axes ({self})".format(block=block.ndim, self=self.ndim)
)
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
"Length mismatch: Expected axis has {old} elements, new "
"values have {new} elements".format(old=old_len, new=new_len)
)
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self):
return self.axes[0]
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [
self.axes[0].get_indexer(blk_items) for blk_items in bitems
]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs)
)
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
else:
output += "\nAxis {i}: {ax}".format(i=i, ax=ax)
for block in self.blocks:
output += "\n{block}".format(block=pprint_thing(block))
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
"block items\n# manager items: {0}, # "
"tot_items: {1}".format(len(self.items), tot_items)
)
def apply(
self,
f,
axes=None,
filter=None,
do_integrity_check=False,
consolidate=True,
**kwargs,
):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
elif f == "fillna":
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ["value"]
else:
align_keys = []
# TODO(EA): may interfere with ExtensionBlock.setitem for blocks
# with a .values attribute.
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = obj._info_axis_number
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
return bm
def quantile(
self,
axis=0,
consolidate=True,
transposed=False,
interpolation="linear",
qs=None,
numeric_only=None,
):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
)
def isna(self, func, **kwargs):
return self.apply("apply", func=func, **kwargs)
def where(self, **kwargs):
return self.apply("where", **kwargs)
def setitem(self, **kwargs):
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, **kwargs):
return self.apply("diff", **kwargs)
def interpolate(self, **kwargs):
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs):
return self.apply("shift", **kwargs)
def fillna(self, **kwargs):
return self.apply("fillna", **kwargs)
def downcast(self, **kwargs):
return self.apply("downcast", **kwargs)
def astype(self, dtype, **kwargs):
return self.apply("astype", dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
def replace(self, value, **kwargs):
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
# figure out our mask a-priori to avoid repeated replacements
values = self.as_array()
def comp(s, regex=False):
"""
Generate a bool array by perform an equality check, or perform
an element-wise regular expression matching
"""
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
new_rb = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any():
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any(block.is_datelike for block in self.blocks)
@property
def any_extension_types(self):
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = | lib.get_reverse_indexer(indexer, self.shape[0]) | pandas._libs.lib.get_reverse_indexer |
'''
Run using python from terminal.
Doesn't read from scripts directory (L13) when run from poetry shell.
'''
import pandas as pd
import pandas.testing as pd_testing
import typing as tp
import os
import unittest
from unittest import mock
import datetime
from scripts import influx_metrics_univ3 as imetrics
class TestInfluxMetrics(unittest.TestCase):
def get_price_cumulatives_df(self, path) -> pd.DataFrame:
'''
Helper to return dataframe used to mock out `query_data_frame` in the
`get_price_cumulatives` function in `scripts/influx_metrics_univ3.py`
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.join(base, 'helpers')
base = os.path.join(base, path)
base = os.path.join(base, 'get-price-cumulatives.csv')
df = pd.read_csv(base, sep=',')
df._start = pd.to_datetime(df._start)
df._stop = pd.to_datetime(df._stop)
df._time = pd.to_datetime(df._time)
return df
def get_find_start_df(self, path) -> pd.DataFrame:
'''
Helper to return dataframe used to mock out `query_data_frame` in the
`find_start` function in `scripts/influx_metrics_univ3.py`
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.join(base, 'helpers')
base = os.path.join(base, path)
base = os.path.join(base, 'find_start.csv')
df = pd.read_csv(base, sep=',', index_col=0)
df._start = pd.to_datetime(df._start)
df._stop = pd.to_datetime(df._stop)
df._time = pd.to_datetime(df._time)
return df
def get_list_of_timestamps_df(self, path) -> pd.DataFrame:
'''
Helper to return dataframe used to mock out `query_data_frame` in the
`list_of_timestamps` function in `scripts/influx_metrics_univ3.py`
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.join(base, 'helpers')
base = os.path.join(base, path)
base = os.path.join(base, 'list_of_timestamps.csv')
df = pd.read_csv(base, sep=',', index_col=0)
df._time = pd.to_datetime(df._time)
return df
def get_pc_dfs(self, df: pd.DataFrame) -> tp.List[pd.DataFrame]:
'''
Helper to format dataframe used to mock out `query_data_frame`
'''
df_filtered = df.filter(items=['_time', '_field', '_value'])
df_p0c = df_filtered[df_filtered['_field'] == 'tick_cumulative']
df_p0c = df_p0c.sort_values(by='_time', ignore_index=True)
df_p0c.loc[:, '_field'] = 'tick_cumulative0'
df_p1c = df_p0c.copy()
df_p1c.loc[:, '_field'] = 'tick_cumulative1'
df_p1c.loc[:, '_value'] = df_p0c.loc[:, '_value']
return [df_p0c, df_p1c]
@mock.patch('scripts.influx_metrics_univ3.InfluxDBClient')
def test_create_client(self, mock_idb_client):
'''
Assert that an `InfluxDBClient` is instantiated one time with config
dict containing the `url` and `token` key-value pairs
'''
config = {
'token': '<PASSWORD>',
'org': 'INFLUXDB_ORG',
'bucket': 'ovl_metrics_univ3',
'source': 'ovl_univ3_1h',
'url': 'INFLUXDB_URL',
}
self.assertEqual(mock_idb_client.call_count, 0)
imetrics.create_client(config)
self.assertEqual(mock_idb_client.call_count, 1)
def test_get_config(self):
"""
Assert `config` dict contains expected InfluxDB config parameter keys
"""
expected = {'token', 'org', 'bucket', 'source', 'url'}
actual = set(imetrics.get_config().keys())
self.assertEqual(expected, actual)
def test_get_params(self):
"""
Assert `params` dict contains expected keys used in statistical
estimates
"""
expected_keys = {"points", "window", "period",
"tolerance", "alpha", "n", "data_start"}
actual = imetrics.get_params()
actual_keys = set(actual.keys())
print(actual)
print(type(actual['alpha']))
self.assertEqual(expected_keys, actual_keys)
self.assertIsInstance(actual['points'], int)
self.assertIsInstance(actual['window'], int)
self.assertIsInstance(actual['period'], int)
self.assertIsInstance(actual['tolerance'], int)
self.assertIsInstance(actual['alpha'], tp.List)
self.assertIsInstance(actual['n'], tp.List)
self.assertIsInstance(actual['data_start'], int)
for i in actual['alpha']:
self.assertIsInstance(i, float)
for i in actual['n']:
self.assertIsInstance(i, int)
def test_get_quotes_path(self):
'''
Assert quote path is correct
'''
base = os.path.dirname(os.path.abspath(__file__))
base = os.path.abspath(os.path.join(base, os.pardir))
base = os.path.abspath(os.path.join(base, os.pardir))
qp = 'scripts/constants/univ3_quotes.json'
expected = os.path.join(base, qp)
actual = imetrics.get_quote_path()
self.assertEqual(expected, actual)
def test_get_quotes(self):
expected_keys = {'id', 'block_deployed', 'time_deployed',
'pair', 'token0', 'token1', 'token0_name',
'token1_name', 'is_price0', 'amount_in'}
actual = imetrics.get_quotes()
self.assertIsInstance(actual, tp.List)
for i in actual:
actual_keys = set(i.keys())
self.assertEqual(expected_keys, actual_keys)
self.assertIsInstance(i['is_price0'], bool)
self.assertIsInstance(i['id'], str)
self.assertIsInstance(i['pair'], str)
self.assertIsInstance(i['token0'], str)
self.assertIsInstance(i['token1'], str)
self.assertIsInstance(i['amount_in'], float)
def test_get_price_fields(self):
'''
Assert price field is 'tick_cumulative'
'''
expected = 'tick_cumulative'
actual = imetrics.get_price_fields()
self.assertEqual(expected, actual)
# TODO: run for all quotes in `quotes.json`
@mock.patch('influxdb_client.client.query_api.QueryApi.query_data_frame')
def test_get_price_cumulatives(self, mock_df):
path = 'influx-metrics/uniswap_v3/'
start_time = 1636526054 # any `int` values will do for the mock
end_time = 1636528398
query_df = self.get_price_cumulatives_df(path)
expected_pcs = self.get_pc_dfs(query_df)
mock_df.return_value = query_df
config = {
'token': 'INFLUXDB_TOKEN',
'org': 'INFLUXDB_ORG',
'bucket': 'ovl_metrics_univ3',
'source': 'ovl_univ3_1h',
'url': 'INFLUXDB_URL',
}
client = imetrics.create_client(config)
query_api = client.query_api()
params = imetrics.get_params()
quotes = imetrics.get_quotes()
quote = quotes[0]
_, actual_pcs = imetrics.get_price_cumulatives(
query_api, config,
quote, params,
datetime.datetime.utcfromtimestamp(start_time),
datetime.datetime.utcfromtimestamp(end_time)
)
| pd_testing.assert_frame_equal(expected_pcs[0], actual_pcs[0]) | pandas.testing.assert_frame_equal |
# 预处理复赛数据
import os
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
import numpy as np
from sklearn.metrics import f1_score
path = './'
w2v_path = path + '/w2v'
train = pd.read_csv(path + '/train_2.csv')
test = pd.read_csv(path + '/test_2.csv')
train_stacking = pd.read_csv(path + '/stack/train.csv')
test_stacking = pd.read_csv(path + '/stack/test.csv')
print(len(train), len(test))
train = train.merge(train_stacking, 'left', 'user_id')
test = test.merge(test_stacking, 'left', 'user_id')
print(len(train), len(test))
train_first = pd.read_csv(path + '/train_all.csv')
train['data_type'] = 0
test['data_type'] = 0
train_first['data_type'] = 1
data = pd.concat([train, test], ignore_index=True).fillna(0)
train_first = train_first.fillna(0)
data['label'] = data.current_service.astype(int)
data = data.replace('\\N', 0)
train_first = train_first.replace('\\N', 0)
data['gender'] = data.gender.astype(int)
train_first['gender'] = train_first.gender.astype(int)
data.loc[data['service_type'] == 3, 'service_type'] = 4
origin_cate_feature = ['service_type', 'complaint_level', 'contract_type', 'gender', 'is_mix_service',
'is_promise_low_consume',
'many_over_bill', 'net_service']
origin_num_feature = ['1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee',
'age', 'contract_time',
'former_complaint_fee', 'former_complaint_num',
'last_month_traffic', 'local_caller_time', 'local_trafffic_month', 'month_traffic',
'online_time', 'pay_num', 'pay_times', 'service1_caller_time', 'service2_caller_time']
for i in origin_num_feature:
data[i] = data[i].astype(float)
train_first[i] = train_first[i].astype(float)
w2v_features = []
for col in ['1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee']:
df = | pd.read_csv(w2v_path + '/' + col + '.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 07:16:35 2018
@author: MiguelArturo
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from math import log, sqrt
import numpy as np
import pandas as pd
from bokeh.plotting import figure
#Import modules for interactive graphics
from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Select
from bokeh.io import curdoc
#Import modules for conversions to radians.
import math
#Import modules for time management and time zones
import time, datetime
import pytz
from pytz import timezone
#Color palette
import seaborn as sns
def make_plot(source):
"""
Plot the annular wedges
Parameters
----------
source : ColumnDataSources
Returns
-------
return : Figure
"""
hover = HoverTool(
names=["anular_wedges"],
tooltips=[
("Activity", "@Name"),
("color", "@color"),
("Time Zone","@Time_Zone"),
])
plot = figure(width=700, height=700,tools=[hover], title="",x_axis_type=None, y_axis_type=None, x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="white", background_fill_color="#ffffff",)
plot.annular_wedge(x=0, y=0, inner_radius='inner_radius', outer_radius='outer_radius',start_angle='start_angle', end_angle='end_angle',
color='color', alpha=0.9, hover_color='color',hover_line_color="black", hover_alpha = 0.5, source=source,name="anular_wedges",legend='Name')
#Fixed attributes
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
#plot clock
angles = 2*np.pi/24*pd.Series(list(range(0,24)))
plot.annular_wedge(0, 0, fr_inner_radius, tr_outer_radius, angles, angles, color="lightgrey")
# Plot clock labels (24 hours)
labels = np.power(10.0, np.arange(-3, 4))
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = ((tr_outer_radius + 10) - fr_inner_radius) / (minr - maxr)
b = fr_inner_radius - a * maxr
radii = a * np.sqrt(np.log(labels * 1E4)) + b
xr = radii[0]*np.cos(np.array(angles))
yr = radii[0]*np.sin(np.array(angles))
label_angle=np.array(angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
labels_24h_clock = list(range(6,-1,-1)) + list(range(23,6,-1))
plot.text(xr, yr, | pd.Series(labels_24h_clock) | pandas.Series |
from os.path import join
import threading
from pandas import DataFrame
try:
from main import main
from data_access import GetData
from utils import get_folder_path, write_yaml, read_yaml
from configs import conf
from scheduler_service import create_job
except Exception as e:
from .main import main
from .data_access import GetData
from .utils import get_folder_path, write_yaml, read_yaml
from .configs import conf
from .scheduler_service import create_job
class ABTest:
"""
data: You may directly assign data to testing process. I that case you don`t need to assign
data_source / data_query_path. But you may only assign pandas data-frame
test_groups: column of the data which represents A - B Test of groups.
It is a column name from the data.
AB test runs as control - active group name related to columns of unique values.
This column has to 2 unique values which shows us the test groups
groups: column of the data which represents individual groups for Testing.
AB Testing will be applied for each Groups which are unique value of groups column in data.
feature: Represents testing values of Test.
Test calculation will be applied according the feature column
data_source: AWS RedShift, BigQuery, PostgreSQL, csv, json files can be connected to system
E.g.
{"data_source": ..., "db": ..., "password": ..., "port": ..., "server": ..., "user": ...}
data_query_path: if there is file for data importing;
must be the path (e.g /../.../ab_test_raw_data.csv)
if there is ac- connection such as PostgreSQL / BigQuery
query must at the format "SELECT++++*+++FROM++ab_test_table_+++"
time_indicator: This can only be applied with date. It can be hour, day, week, week_part, quarter, year, month.
Individually time indicator checks the date part is significantly
a individual group for data set or not.
If it is uses time_indicator as a group
exporting_data: If you dont need to export data assign False. By default it is True
export_path: Output results of export as csv format (optional).
only path is enough for importing data with .csv format.
Output will be '<date>_results.csv' with the test executed date. e.g. 20201205.results.csv
time_schedule: When AB Test need to be scheduled, only need to be assign here 'Hourly', 'Monthly',
'Weekly', 'Mondays', ... , Sundays.
time_period: The additional time period which (optional year, month, day, hour, week,
week day, day part, quarter) (check details time periods).
This parameter must be assigned when A/B Test is scheduling.
"""
def __init__(self,
test_groups,
data=None,
groups=None,
feature=None,
data_source=None,
data_query_path=None,
time_period=None,
time_indicator=None,
time_schedule=None,
exporting_data=True,
export_path=None,
connector=None,
confidence_level=None,
boostrap_sample_ratio=None,
boostrap_iteration=None):
self.test_groups = test_groups
self.data = data
self.groups = groups
self.feature = feature
self.data_source = data_source
self.data_query_path = data_query_path
self.time_period = time_period
self.time_indicator = time_indicator
self.time_schedule = time_schedule
self.exporting_data = False if export_path is None else exporting_data
self.export_path = export_path
self.connector = connector
self.confidence_level = confidence_level
self.boostrap_sample_ratio = boostrap_sample_ratio
self.boostrap_iteration = boostrap_iteration
self.arguments = {"data": data,
"test_groups": test_groups,
"groups": groups,
"feature": feature,
"data_source": data_source,
"data_query_path": data_query_path,
"time_period": time_period,
"time_indicator": time_indicator,
"export_path": export_path,
"exporting_data": exporting_data,
"parameters": None}
self.arg_terminal = {"test_groups": "TG",
"groups": "G",
"date": "D",
"feature": "F",
"data_source": "DS",
"data_query_path": "DQP",
"time_period": "TP",
"time_indicator": "TI", "export_path": "EP", "parameters": "P"}
self.args_str = ""
self.ab_test = None
self.path = get_folder_path()
self.mandetory_arguments = ["data_source", "data_query_path", "test_groups", "groups", "feature", "export_path"]
self.schedule_arg = "TS"
self.params = None
def get_connector(self):
"""
query_string_change connection checks.
tries for db connections (postgresql, RedShift, googlebigquery).
If fials checks for
"""
if self.data is None:
config = conf('config')
try:
data_access_args = {"data_source": self.data_source,
"data_query_path": self.data_query_path,
"time_indicator": self.time_indicator,
"feature": self.feature}
for i in config['db_connection']:
if i != 'data_source':
config['db_connection'][i] = None
if self.data_source not in ["csv", "json"]:
config['db_connection'][i] = self.connector[i]
else:
config['db_connection']['data_source'] = self.data_source
if self.data_source in ["csv", "json"]:
data_access_args['test'] = 10
write_yaml(join(self.path, "docs"), "configs.yaml", config, ignoring_aliases=False)
source = GetData(**data_access_args)
source.get_connection()
if self.data_source in ["csv", "json"]:
source.data_execute()
return True if len(source.data) != 0 else False
else: return True
except Exception as e:
return False
else:
return True if type(self.data) == DataFrame else False
def query_string_change(self):
if self.data_source in ['mysql', 'postgresql', 'awsredshift', 'googlebigquery']:
self.data_query_path = self.data_query_path.replace("\r", " ").replace("\n", " ").replace(" ", "+") + "+"
def check_for_time_period(self):
if self.time_period is None:
return True
else:
if self.time_period in ["day", "year", "month", "week", "week_day",
"hour", "quarter", "week_part", "day_part"]:
return True
else:
return False
def check_for_time_schedule(self):
if self.time_schedule is None:
return True
else:
if self.time_schedule in ["Mondays", "Tuesdays", "Wednesdays", "Thursdays", "Fridays",
"Saturdays", "Sundays", "Daily", "hour", "week"]:
return True
else:
return False
def assign_test_parameters(self, param, param_name):
if param is not None:
for i in self.params:
if type(param) == list:
if len([i for i in param if 0 < i < 1]) != 0:
self.params[i][param_name] = "_".join([str(i) for i in param if 0 < i < 1])
else:
if 0 < param < 1:
self.params[i][param_name] = str(param)
def check_for_test_parameters(self):
"""
checking and updating test parameters; confidence_level and boostrap_ratio
Boostrap Ratio: decision of the ratio of boostraping.
sample_size = data_size * boostrap_Ratio
Confidence Level: The decision of Hypothesis Test of Confidences Level.
This allows us to run or A/B Test with more than one Confidence Level to see
how the Hypothesis of Acceptance is changing.
"""
if self.confidence_level is not None or self.boostrap_sample_ratio is not None:
self.params = read_yaml(join(self.path, "docs"), "test_parameters.yaml")['test_parameters']
for _p in [(self.confidence_level, "confidence_level"),
(self.boostrap_sample_ratio, "sample_size"),
(self.boostrap_iteration, "iteration")]:
self.assign_test_parameters(param=_p[0], param_name=_p[1])
self.arguments["parameters"] = self.params
def check_for_mandetory_arguments(self):
accept = True
if self.data is None:
for arg in self.arg_terminal:
if arg in self.mandetory_arguments:
if self.arguments[arg] is None:
accept = False
return accept
def ab_test_init(self):
"""
Initialize A/B Test. After assgn parameters don`t forget to run ab_test_init.
Example;
groups = "groups"
test_groups = "test_groups"
feature = "feature"
data_source = "postgresql"
connector = {"user": ***, "password": ***, "server": "127.0.0.1",
"port": ****, "db": ***}
data_main_path = '
SELECT
groups,
test_groups
feature,
time_indicator
FROM table
'
confidence_level = [0.01, 0.05]
boostrap_ratio = [0.1, 0.2]
export_path = abspath("") + '/data'
ab = ABTest(test_groups=test_groups,
groups=groups,
feature=feature,
data_source=data_source,
data_query_path=query,
time_period=time_period,
time_indicator=time_indicator,
time_schedule=time_schedule,
export_path=export_path,
connector=connector,
confidence_level=confidence_level,
boostrap_sample_ratio=boostrap_ratio)
ab.ab_test_init()
"""
self.check_for_test_parameters() # checking and updating test parameters; confidence_level and boostrap_ratio
self.query_string_change()
if self.get_connector(): # connection to data source check
if self.check_for_time_period():
if self.check_for_mandetory_arguments():
self.ab_test = main(**self.arguments)
else:
print("check for the required paramters to initialize A/B Test:")
print(" - ".join(self.mandetory_arguments))
else:
print("optional time periods are :")
print("year", "month", "week", "week_day", "hour", "quarter", "week_part", "day_part")
else:
print("pls check for data source connection / path / query.")
def get_results(self):
if self.ab_test is not None:
return self.ab_test.final_results
else:
return | DataFrame() | pandas.DataFrame |
# Import pyVPLM packages
from pyvplm.core.definition import PositiveParameter, PositiveParameterSet
from pyvplm.addon import variablepowerlaw as vpl
from pyvplm.addon import pixdoe as doe
from pint import UnitRegistry
import save_load as sl
import pi_format as pif
import csv_export as csv
import constraint_format as csf
import round_minmax as rmm
import constant_pi as cpi
import number_of_coeff as noc
import dependency_plot as dpp
import save_plots as spl
import save_py_func as spf
# Import external libs
import copy
import os
import pandas as pd
from pandas.plotting import scatter_matrix
import plotly.graph_objects as go
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import webbrowser
import ipyfilechooser as ipf
import time
from datetime import datetime
import ipywidgets as widgets
import ipyvuetify as v
from IPython.display import display, clear_output
import warnings
import seaborn as sns
from win32gui import GetWindowRect, GetForegroundWindow
# ------------Constants------------------------
from text_list import TEXT_LIST as TL
FORBIDDEN_CHARACTERS = [' ', '|', '*', '/', '-', '+', ',', "#", "!", "$", "£", "%", "^", "&", "?", ";", "ù", "é",
"@", "¤", "µ", "è", "°", "\\", '"', "'"]
FORBIDDEN_CHARACTERS_DESC = ['|', '"', "'", "#"]
FORBIDDEN_PARAMS = ['I', 'gamma', 'beta', 're', 'ln', 'log', 'sqrt', 'arg']
DOE_MULTIPLIER = 10
# ------------Global variables-----------------
WORKDIR = os.path.abspath(os.getcwd())
OUTPUTS = 0
PHYSICAL_PARAMS = None
OLD_PHYSICAL_PARAMS = None
CHOSEN_PI_SET = None
PI_SETS = [None, None, []]
CHOSEN_PI_LIST = []
PI_LISTS = [[], [], []]
DOE_PI_LIST = []
DOE = []
TOTAL_DOE = pd.DataFrame()
FIG = plt.Figure()
AX = FIG.add_subplot(111)
RESULT_DF = pd.DataFrame()
OLD_RESULT = pd.DataFrame()
OLD_PI_SET = []
RESULT_PI = np.array([])
DEPENDENCY_CHECK_STATE = []
OLD_DEPENDENCY_CHECK_STATE = []
REGRESSION_PI_LIST = []
MODELS = {}
REGRESSIONS = []
PI0_PI_LIST = []
"""
This is the code for GUI widgets and their associated functions. The first part contains all functions,
the second part (~line 2600) contains the widgets. These two parts are subdivided by tab name.
"""
# -----------Functions--------------------------------------------------------------------------------------------------
# Fist Physical Parameters Tab, some Buckingham tab and all Toolbar functions as well as some general helper functions
def check_name(name):
"""
Parameters
----------
name String in name TextField
Returns Boolean : True if the name is valid
-------
"""
if name == '':
name_entry.error_messages = TL[0]
return False
for for_char in FORBIDDEN_CHARACTERS:
if for_char in name:
name_entry.error_messages = f"{TL[1]}: {for_char}"
return False
for for_param in FORBIDDEN_PARAMS:
if name == for_param:
name_entry.error_messages = f"{TL[51]}: {for_param}"
return False
for item in sheet.items:
if item['name'] == name or item['name'].lower() == name:
name_entry.error_messages = TL[2]
return False
return True
def check_desc(desc):
"""
Parameters
----------
desc String in description TextField
Returns Boolean : True if the description is valid
-------
"""
for for_char in FORBIDDEN_CHARACTERS_DESC:
if for_char in desc:
desc_entry.error_messages = f"{TL[3]} : {for_char}"
return False
return True
def check_unit(unit):
"""
Parameters
----------
unit String in unit TextField
Returns Boolean : True if the unit is recognized by pint
-------
"""
if unit == '':
unit_entry.error_messages = TL[4]
return False
base_registry = UnitRegistry()
try:
if unit not in base_registry:
contains_upper = False
for u in unit:
if u.isupper():
contains_upper = True
break
if contains_upper:
unit_entry.error_messages = "Unit not recognized, try in lowercase"
else:
unit_entry.error_messages = TL[5]
return False
except Exception:
unit_entry.error_messages = "Invalid characters"
return False
return True
def check_bounds():
"""
Returns Boolean : True if the bounds in the lower bound and upper bound TextFields are valid
-------
"""
lb = lb_entry.v_model
ub = ub_entry.v_model
lbool = lb is None or lb == ""
ubool = ub is None or ub == ""
if ubool:
ub_entry.error_messages = TL[6]
return False
err_mess = TL[7]
if lbool:
try:
float(ub)
return True
except ValueError:
ub_entry.error_messages = err_mess
return False
else:
brk = False
try:
ub = float(ub)
except ValueError:
ub_entry.error_messages = err_mess
brk = True
try:
lb = float(lb)
except ValueError:
lb_entry.error_messages = err_mess
brk = True
if brk:
return False
if 0 < lb < ub:
return True
else:
neg = False
err_mess = TL[8]
if lb <= 0:
neg = True
lb_entry.error_messages = err_mess
if ub <= 0:
neg = True
ub_entry.error_messages = err_mess
if neg:
return False
else:
err_mess = TL[9]
lb_entry.error_messages = err_mess
ub_entry.error_messages = err_mess
return False
def add_item(widget, event, data):
"""
Returns Adds parameter specified by the user in the sheet DataTable, if one of the attributes is invalid, shows the
user an error under the TextField
-------
"""
name_entry.error_messages = ''
desc_entry.error_messages = ''
unit_entry.error_messages = ''
lb_entry.error_messages = ''
ub_entry.error_messages = ''
if check_name(name_entry.v_model) and check_desc(desc_entry.v_model) and check_unit(
unit_entry.v_model) and check_bounds():
name = name_entry.v_model
description = desc_entry.v_model
unit = unit_entry.v_model
lb = lb_entry.v_model
if lb:
lower_bound = float(lb_entry.v_model)
else:
lower_bound = None
name = name.upper()
upper_bound = float(ub_entry.v_model)
name_entry.v_model = ''
desc_entry.v_model = ''
unit_entry.v_model = ''
lb_entry.v_model = None
ub_entry.v_model = None
sheet.items = sheet.items + [{"name": name,
"description": description,
"unit": unit,
"lower bound": lower_bound,
"upper bound": upper_bound,
"in/out": "Input"}]
def order_items():
"""
Leaves output physical parameters at the end of the set (least priority to be repetitive)
Returns ordered physical parameters
-------
"""
data = sheet.items
inputs = []
outputs = []
for item in data:
if item["in/out"] == TL[10]:
outputs.append(item)
else:
inputs.append(item)
return inputs + outputs
def gen_parameter_set():
"""
Returns Generates a PositiveParameterSet from the physical parameters in the sheet DataTable, if there are none,
returns None
-------
"""
data = order_items()
if len(data) > 0:
first = True
param_set = {}
for item in data:
if item['lower bound'] is None or item['lower bound'] == "":
bounds = [item['upper bound']]
item['name'] = item['name'].upper()
else:
bounds = [item['lower bound'], item['upper bound']]
param = PositiveParameter(item['name'], bounds, item['unit'], item['description'])
param_set[item['name']] = param
if first:
param_set = PositiveParameterSet(param)
first = False
return param_set
return None
def get_outputs():
"""
Returns int : The number of output parameters specified
-------
"""
global OUTPUTS
n = 0
for item in sheet.items:
if item['in/out'] == TL[10]:
n += 1
OUTPUTS = n
def buckingham():
"""
Returns Shows the set in buck_area and modifies current_set
-------
"""
global PHYSICAL_PARAMS, PI_LISTS, PI_SETS
if PHYSICAL_PARAMS is not None:
# noinspection PyTypeChecker
PI_SETS[0], PI_LISTS[0] = vpl.buckingham_theorem(PHYSICAL_PARAMS, True)
pi_set_str = str(PI_SETS[0])
formatted_pi_set = pif.format_pi_set(pi_set_str)
buck_area.v_model = formatted_pi_set
if force_area.v_model is None or force_area.v_model == "":
force_area.v_model = formatted_pi_set
if check1.v_model:
global CHOSEN_PI_SET, CHOSEN_PI_LIST
CHOSEN_PI_SET = PI_SETS[0]
CHOSEN_PI_LIST = PI_LISTS[0]
update_current_set()
if PI_LISTS[0]:
return True
return False
def force_buckingham(widget, event, data):
"""
Parameters
----------
widget force_buck_btn : button to check pi set
Returns Enables selection of the specified pi set if it is valid
-------
"""
widget.disabled = True
widget.loading = True
if force_buck_btn.children == [TL[11]]:
param_set = gen_parameter_set()
global OUTPUTS
out_n = OUTPUTS
try:
global PI_LISTS
PI_LISTS[1] = pif.format_force_area(force_area.v_model)
global PI_SETS
PI_SETS[1] = vpl.force_buckingham(param_set, *PI_LISTS[1])
if pif.check_outputs(PI_LISTS[1], param_set, out_n):
raise ValueError(TL[12])
force_area.error_messages = ""
force_area.success_messages = TL[13]
check2.disabled = False
force_area.readonly = True
force_area.clearable = False
if ' | ' in force_area.v_model:
force_area.v_model = force_area.v_model.replace(' | ', '\n')
force_area.background_color = "grey lighten-3"
force_eq.disabled = True
force_eq.v_model = ""
force_eq.background_color = "grey lighten-3"
add_pi_btn.disabled = True
force_copy_btn.disabled = True
force_buck_btn.children = [TL[14]]
except Exception as e:
force_area.success_messages = ""
force_area.error_messages = TL[15] + str(e)
else:
force_area.success_messages = ""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if check2.v_model:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
check2.disabled = True
check2.v_model = False
force_area.readonly = False
force_area.clearable = True
force_area.background_color = "white"
force_eq.disabled = False
force_eq.background_color = "white"
add_pi_btn.disabled = False
if auto_buck_table.v_model:
force_copy_btn.disabled = False
force_area.messages = ""
force_buck_btn.children = [TL[11]]
widget.loading = False
widget.disabled = False
def automatic_buckingham(widget, event, data):
"""
Parameters
----------
widget auto_buck_btn : button to perform automatic Buckingham analysis
Returns Fills auto_buck_table with the resulting pi sets
-------
"""
widget.disabled = True
widget.loading = True
param_set = gen_parameter_set()
combinator_pi_set, alternative_set_dict = vpl.automatic_buckingham(param_set, True)
global PI_SETS, PI_LISTS, PHYSICAL_PARAMS, OUTPUTS
for n in combinator_pi_set:
PI_SETS[2].append(combinator_pi_set[n][0])
PI_LISTS[2].append(list(combinator_pi_set[n][1]))
items = []
i = 0
j = 1
del_index = []
for exp in alternative_set_dict:
if not pif.check_outputs(PI_LISTS[2][i], PHYSICAL_PARAMS, OUTPUTS):
items.append({"pi set number": j, "expressions": exp})
j += 1
else:
del_index.append(i)
i += 1
del_index.reverse()
for i in del_index:
PI_SETS[2].pop(i)
PI_LISTS[2].pop(i)
auto_buck_table.items = items
if force_buck_btn.children == [TL[11]]:
force_copy_btn.disabled = False
check3.disabled = False
widget.loading = False
widget.disabled = False
def force_copy(widget, event, data):
"""
Returns Copies the selected pi set from auto_buck_table or buck area to force_area
-------
"""
l = len(auto_buck_table.items)
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number']:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
for i in range(0, l):
if auto_buck_table.items[i]['pi set number'] == pi_set_nb:
force_area.v_model = pif.format_auto_pi_set(auto_buck_table.v_model[0]['expressions'])
break
elif check1.v_model:
force_area.v_model = buck_area.v_model
def check1_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the pi set in buck_area
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check2.v_model = False
check3.v_model = False
CHOSEN_PI_SET = PI_SETS[0]
CHOSEN_PI_LIST = PI_LISTS[0]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def check2_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the pi set in force_area
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check1.v_model = False
check3.v_model = False
CHOSEN_PI_SET = PI_SETS[1]
CHOSEN_PI_LIST = PI_LISTS[1]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def check3_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the selected pi set in auto_buck_table
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check1.v_model = False
check2.v_model = False
l = len(auto_buck_table.items)
if auto_buck_table.v_model:
if auto_buck_table.v_model[0]['pi set number'] is None:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
else:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
CHOSEN_PI_SET = PI_SETS[2][pi_set_nb - 1]
CHOSEN_PI_LIST = PI_LISTS[2][pi_set_nb - 1]
for i in range(0, l):
if auto_buck_table.items[i]['pi set number'] == pi_set_nb:
update_current_set()
break
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def select_auto_pi_set(widget, event, data):
"""
Parameters
----------
data dict: Contains the pi set number of the selected pi set in the automatic buckingham data table
Returns Modifies current set accordingly
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if check3.v_model:
if data['value']:
pi_set_nb = data['item']['pi set number']
CHOSEN_PI_SET = PI_SETS[2][pi_set_nb - 1]
CHOSEN_PI_LIST = PI_LISTS[2][pi_set_nb - 1]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def pi_set_html(pi_set, math=True):
"""
Parameters
----------
pi_set: Pi set in a string form (with " | " separators between pi numbers)
math: display expression as Latex math (default True)
Returns A list of v.HTML widgets that are to be used as children of a v.CardText
-------
"""
if not math:
pi_set = pi_set.replace("**", "°°")
pi_set = pi_set.replace("*", " * ")
pi_set = pi_set.replace("°°", "**")
spt_pi_set = pi_set.split("| ")
card_text_children = []
for pi in spt_pi_set:
card_text_children.append(v.Html(tag='div', children=[pi]))
return card_text_children
else:
pi_set = pi_set.replace("**", "^{")
spt_pi_set = pi_set.split("| ")
for i in range(len(spt_pi_set)):
pi_expr = spt_pi_set[i]
pi_expr = pi_expr.replace(f"pi", f"\pi_", 1)
pi = list(pi_expr)
open_bracket = False
for j in range(len(pi)):
if pi[j] == "{":
open_bracket = True
if pi[j] == "*" and open_bracket:
pi[j] = "}"
open_bracket = False
pi_expr = "".join(pi)
pi_expr = pi_expr.replace("}", "}\\ \cdot \\ ")
pi_expr = pi_expr.replace("*", "\\ \cdot \\ ")
if open_bracket:
pi_expr += "}"
pi_expr = pi_expr.replace("=", "\\ = \\")
spt_pi_set[i] = pi_expr
card_text_children = []
str_latex = r"$"
for pi in spt_pi_set:
str_latex += pi + r"\\"
card_text_children.append(widgets.HTMLMath(str_latex + "$"))
return card_text_children
def update_current_set():
"""
Returns Shows the current selected pi set to the user in current_set Card
-------
"""
global CHOSEN_PI_LIST
out_set = pif.pi_list_to_str(CHOSEN_PI_LIST)
if out_set:
current_set.children[0].children = [TL[52]]
current_set.color = "green lighten-3"
else:
current_set.children[0].children = [TL[53]]
current_set.color = "grey lighten-3"
current_set.children[1].children = pi_set_html(out_set)
def del_item(widget, event, data):
"""
Returns Deletes the selected parameter from the sheet data table
-------
"""
if sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(len(sheet.items)):
if sheet.items[i]['name'] == item_name:
if i == len(sheet.items):
sheet.items = sheet.items[:-1]
else:
sheet.items = sheet.items[0:i] + sheet.items[i + 1:]
break
def del_all(widget, event, data):
"""
Returns Deletes all parameters from the sheet data table
-------
"""
sheet.items = []
def up_item(widget, event, data):
"""
Returns Moves up the selected parameter in the sheet data table
-------
"""
l = len(sheet.items)
if l >= 2 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(1, l):
if sheet.items[i]['name'] == item_name:
if i == l:
sheet.items = sheet.items[0:i - 1] + [sheet.items[i]] + [sheet.items[i - 1]]
else:
sheet.items = sheet.items[0:i - 1] + [sheet.items[i]] + [sheet.items[i - 1]] + sheet.items[i + 1:]
break
def down_item(widget, event, data):
"""
Returns Moves down the selected parameter in the sheet data table
-------
"""
l = len(sheet.items)
if l >= 2 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(0, l - 1):
if sheet.items[i]['name'] == item_name:
if i == l - 1:
sheet.items = sheet.items[0:i] + [sheet.items[i + 1]] + [sheet.items[i]]
else:
sheet.items = sheet.items[0:i] + [sheet.items[i + 1]] + [sheet.items[i]] + sheet.items[i + 2:]
break
def set_as_out(widget, event, data):
"""
Returns Sets the selected parameter as output in the sheet data table
-------
"""
l = len(sheet.items)
if l > 0 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(0, l):
if sheet.items[i]['name'] == item_name:
if sheet.items[i]['in/out'] == 'Input':
if sheet.items[i]['lower bound'] is None or sheet.items[i]['lower bound'] == "":
const_alert.value = True
else:
sheet.items = sheet.items[0:i] + [{"name": sheet.items[i]["name"],
"description": sheet.items[i]["description"],
"unit": sheet.items[i]["unit"],
"upper bound": sheet.items[i]["upper bound"],
"lower bound": sheet.items[i]["lower bound"],
'in/out': 'Output'}] + sheet.items[i + 1:]
else:
sheet.items = sheet.items[0:i] + [{"name": sheet.items[i]["name"],
"description": sheet.items[i]["description"],
"unit": sheet.items[i]["unit"],
"upper bound": sheet.items[i]["upper bound"],
"lower bound": sheet.items[i]["lower bound"],
'in/out': 'Input'}] + sheet.items[i + 1:]
break
def error_end(widget, event, data):
"""
Parameters
----------
widget Current widget
Returns Hides the error messages on the current widget
-------
"""
widget.error_messages = ""
def pint_link(widget, event, data):
"""
Returns Opens browser to a page with all pint base units
-------
"""
webbrowser.open_new(r"https://raw.githubusercontent.com/hgrecco/pint/master/pint/default_en.txt")
def new_log(log, success: bool):
"""
Parameters
----------
log The string to be shown if the logs field
success If true, the log will be displayed in green (in red if False)
Returns Replaces previous log with current log in the logs field
-------
"""
if success:
logs_card.class_ = logs_card.class_ + "; green--text"
logs_card.children = [v.Html(tag='div', children=[log], class_="text-left py-2 px-2")]
else:
logs_card.class_ = logs_card.class_ + "; red--text"
logs_card.children = [v.Html(tag='div', children=[log], class_="text-left py-2 px-2")]
def choose_dir(widget, event, data):
"""
Returns Opens the dialog_dir dialog box and initializes it
-------
"""
global WORKDIR
dialog_dir.children[0].children[1].children = ["Current work directory: " + WORKDIR]
dialog_dir.v_model = True
def hide_dir(chooser):
"""
Returns Effectively changes the current work directory (WORKDIR) and closes the dialog_dir dialog box
-------
"""
global WORKDIR
old_workdir = WORKDIR
spl.add_temp(old_workdir)
WORKDIR = fc_dir.selected
spl.move_temp(old_workdir, WORKDIR)
dialog_dir.v_model = False
new_log(f"Work directory: {WORKDIR}", True)
dir_btn.color = "green"
time.sleep(0.5)
dir_btn.color = "default"
def save(widget, event, data):
"""
Parameters
----------
widget The save button int the toolbar
Returns Creates a new pyVPLM save in the work directory with a default name containing date and time
-------
"""
widget.disabled = True
global WORKDIR
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
file_path = WORKDIR + "\pyVPLM_" + dt_string + ".txt"
widget.disabled = True
widget.loading = True
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number'] is not None:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
else:
pi_set_nb = 0
force_state = force_buck_btn.children == [TL[11]]
tab2_state = [check1.v_model, check2.v_model, check3.v_model, force_state, pi_set_nb]
result = [[header["text"] for header in result_data.headers], result_data.items]
doe_params = [select_DOE.v_model, select_log.v_model, anticipated_mo_entry.v_model]
reg_state = [select_pi0.v_model, select_reg_criteria.v_model, model_order_entry.v_model, select_reg_type.v_model,
nb_terms_slider.v_model]
sl.save(file_path, sheet.items, buck_area.v_model, force_area.v_model, auto_buck_table.items, tab2_state,
PHYSICAL_PARAMS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST, phy_const_area.v_model,
pi_const_area.v_model, doe_params, DOE, result, threshold_slider.v_model, DEPENDENCY_CHECK_STATE,
REGRESSION_PI_LIST, reg_state, MODELS)
widget.disabled = False
new_log(f"Saved at: {file_path}", True)
widget.color = "green"
time.sleep(0.5)
widget.color = "default"
def save_as(widget, event, data):
"""
Returns Shows the save dialog
-------
"""
global WORKDIR
dialog.children[0].children[1].children = ["Current work directory: " + WORKDIR]
dialog.v_model = True
def hide_save_as(widget, event, data):
"""
Parameters
----------
widget The OK button in the save dialog
Returns Saves a .txt file with all current user input to the specified path and hides the save dialog
-------
"""
global WORKDIR
save_as_tf.error_messages = ""
if save_as_tf.v_model.strip():
file_path = WORKDIR + "\\" + save_as_tf.v_model + ".txt"
widget.disabled = True
widget.loading = True
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number'] is not None:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
else:
pi_set_nb = 0
force_state = force_buck_btn.children == [TL[11]]
tab2_state = [check1.v_model, check2.v_model, check3.v_model, force_state, pi_set_nb]
result = [[header["text"] for header in result_data.headers], result_data.items]
doe_params = [select_DOE.v_model, select_log.v_model, anticipated_mo_entry.v_model]
reg_state = [select_pi0.v_model, select_reg_criteria.v_model, model_order_entry.v_model,
select_reg_type.v_model, nb_terms_slider.v_model]
sl.save(file_path, sheet.items, buck_area.v_model, force_area.v_model, auto_buck_table.items, tab2_state,
PHYSICAL_PARAMS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST, phy_const_area.v_model,
pi_const_area.v_model, doe_params, DOE, result, threshold_slider.v_model, DEPENDENCY_CHECK_STATE,
REGRESSION_PI_LIST, reg_state, MODELS)
dialog.v_model = False
widget.disabled = False
widget.loading = False
new_log(f"Saved at: {file_path}", True)
save_as_btn.color = "green"
time.sleep(0.5)
save_as_btn.color = "default"
else:
save_as_tf.error_messages = "please specify a file name"
def save_plots(widget, event, data):
"""
Parameters
----------
widget The save all plots button from the toolbar
Returns Saves all the plots that were in the temp directory in the work directory with default names with date and time
-------
"""
try:
spl.save_all_plots(WORKDIR)
new_log(f"All plots saved at: {WORKDIR}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
except FileNotFoundError:
new_log(f"No plots to save", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
def load(widget, event, data):
"""
Returns Shows the load dialog
-------
"""
global WORKDIR
fc_load.default_path = WORKDIR
dialog2.v_model = True
def hide_ld(chooser):
"""
Parameters
----------
widget The OK button in the save dialog
Returns Loads a .txt file and modifies the state of all widgets accordingly, hides the load dialog
-------
"""
file_path = fc_load.selected
if file_path:
global OLD_PHYSICAL_PARAMS, PHYSICAL_PARAMS, OUTPUTS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST,\
RESULT_DF, RESULT_PI, DEPENDENCY_CHECK_STATE, REGRESSION_PI_LIST, MODELS
try:
load_tuple = sl.load(file_path)
except FileNotFoundError:
fc_load.reset()
dialog2.v_model = False
new_log(f"Failed to load, file does not exist", False)
load_btn.color = "red"
time.sleep(0.5)
load_btn.color = "default"
return -1
if len(load_tuple) != 20:
fc_load.reset()
dialog2.v_model = False
new_log(f"Failed to load, invalid file", False)
load_btn.color = "red"
time.sleep(0.5)
load_btn.color = "default"
return -1
dialog2.v_model = False
fc_load.reset()
load_btn.color = "green"
new_log(f"Loaded: {file_path}", True)
sheet.items = load_tuple[0]
buck_area.v_model = load_tuple[1]
force_area.v_model = load_tuple[2]
auto_buck_table.items = load_tuple[3]
tab2_state = load_tuple[4]
PHYSICAL_PARAMS = load_tuple[5]
OLD_PHYSICAL_PARAMS = load_tuple[5]
OUTPUTS = load_tuple[6]
PI_SETS = load_tuple[7]
CHOSEN_PI_SET = load_tuple[8]
PI_LISTS = load_tuple[9]
CHOSEN_PI_LIST = load_tuple[10]
update_current_set()
check1.v_model = tab2_state[0]
check2.v_model = tab2_state[1]
check3.v_model = tab2_state[2]
if tab2_state[3]:
force_area.error_messages = ""
force_area.success_messages = ""
check2.disabled = True
check2.v_model = False
force_area.readonly = False
force_area.clearable = True
force_area.background_color = "white"
force_eq.disabled = False
force_eq.background_color = "white"
add_pi_btn.disabled = False
if auto_buck_table.v_model:
force_copy_btn.disabled = False
force_buck_btn.children = [TL[11]]
else:
force_area.error_messages = ""
force_area.success_messages = TL[18]
check2.disabled = False
force_area.readonly = True
force_area.clearable = False
force_area.background_color = "grey lighten-3"
force_eq.disabled = True
force_eq.v_model = ""
force_eq.background_color = "grey lighten-3"
add_pi_btn.disabled = True
force_copy_btn.disabled = True
force_buck_btn.children = [TL[14]]
if tab2_state[4] == 0:
check3.disabled = True
else:
check3.disabled = False
setattr(auto_buck_table, 'v_model', [auto_buck_table.items[tab2_state[4] - 1]])
anticipated_mo_entry.v_model = load_tuple[12][2]
change_tab_3()
phy_const_area.v_model = load_tuple[11][0]
pi_const_area.v_model = load_tuple[11][1]
select_DOE.v_model = load_tuple[12][0]
select_log.v_model = load_tuple[12][1]
does = load_tuple[13]
if does:
doeX, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active = does
reduced_parameter_set, reduced_pi_set = PHYSICAL_PARAMS, CHOSEN_PI_SET
for out in list(PHYSICAL_PARAMS.dictionary.keys())[-OUTPUTS:]:
reduced_parameter_set, reduced_pi_set = vpl.reduce_parameter_set(reduced_parameter_set,
reduced_pi_set,
elected_output=out)
init_doe_plots(doeX, reduced_parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active,
reduced_pi_set)
if len(doe_box.children) == 3:
doe_box.children = list(doe_box.children) + [exp_panel_doe]
result_headers, result_items = load_tuple[14]
result_data.headers = csv.format_headers(result_headers)
result_data.items = result_items
if result_items:
RESULT_DF = pd.DataFrame(result_items)
func_x_to_pi = vpl.declare_func_x_to_pi(PHYSICAL_PARAMS, CHOSEN_PI_SET)
ordered_columns = []
for key in PHYSICAL_PARAMS.dictionary:
ordered_columns.append(f"{key} [{PHYSICAL_PARAMS.dictionary[key].defined_units}]")
re_ordered_result = RESULT_DF[ordered_columns]
RESULT_PI = func_x_to_pi(re_ordered_result.to_numpy(dtype=float))
threshold_slider.v_model = load_tuple[15]
DEPENDENCY_CHECK_STATE = load_tuple[16]
REGRESSION_PI_LIST = load_tuple[17]
reg_state = load_tuple[18]
if reg_state:
select_pi0.v_model = reg_state[0]
select_reg_criteria.v_model = reg_state[1]
model_order_entry.v_model = int(reg_state[2])
select_reg_type.v_model = reg_state[3]
MODELS = load_tuple[19]
if MODELS:
regression_models(models_btn, 0, 0, slider_state=int(reg_state[4]))
if tabs.v_model == 5:
change_tab_5()
if tabs.v_model == 6:
change_tab_6()
time.sleep(0.5)
load_btn.color = "default"
else:
dialog2.v_model = False
# --------- Buckingham Tab Functions -----------------------------------------------------------------------------------
def add_pi(widget, event, data):
"""
Returns Adds the pi number specified in force_eq to force_area
-------
"""
index = pif.get_pi_index(force_area.v_model)
if force_eq.v_model is None or force_eq.v_model == "":
force_eq.error_messages = TL[21]
else:
exp = pif.format_input(force_eq.v_model, index)
if force_area.v_model is not None:
force_area.v_model += exp + "\n"
else:
force_area.v_model = exp + "\n"
force_eq.v_model = ""
def tab2_reload():
"""
Returns Reloads Buckingham Theorem Tab
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST, PI_SETS, PI_LISTS
CHOSEN_PI_SET = None
PI_SETS = [None, None, []]
CHOSEN_PI_LIST = []
PI_LISTS = [[], [], []]
update_current_set()
buck_area.v_model = ""
check1.v_model = True
force_buck_btn.disabled = False
force_buck_btn.children = [TL[11]]
force_eq.v_model = ""
force_eq.error_messages = ""
force_area.v_model = ""
force_area.success_messages = ""
force_area.error_messages = ""
force_area.readonly = False
force_area.clearable = True
add_pi_btn.disabled = False
force_copy_btn.disabled = False
check2.disabled = True
check2.v_model = False
auto_buck_btn.disabled = False
auto_buck_table.items = []
check3.disabled = True
check3.v_model = False
def tab2_disable():
"""
Returns Disables Buckingham Theorem Tab
-------
"""
force_buck_btn.disabled = True
auto_buck_btn.disabled = True
check1.disabled = True
check1.v_model = False
def tab2_enable():
"""
Returns Enables Buckingham Theorem Tab
-------
"""
force_buck_btn.disabled = False
auto_buck_btn.disabled = False
check1.disabled = False
# -----DOE Tab functions------------------------------------------------------------------------------------------------
def add_phy_const(widget, event, data):
"""
Returns Adds a physical constraint from the text field to the text area
-------
"""
phy_const_entry.error_messages = ""
if phy_const_entry.v_model is None or phy_const_entry.v_model == "":
phy_const_entry.error_messages = TL[21]
else:
exp = phy_const_entry.v_model
if phy_const_area.v_model is not None:
phy_const_area.v_model += exp + "\n"
else:
phy_const_area.v_model = exp + "\n"
phy_const_entry.v_model = ""
def add_pi_const(widget, event, data):
"""
Returns Adds a pi constraint from the text field to the text area
-------
"""
pi_const_entry.error_messages = ""
if pi_const_entry.v_model is None or pi_const_entry.v_model == "":
pi_const_entry.error_messages = TL[21]
else:
exp = pi_const_entry.v_model
if pi_const_area.v_model is not None:
pi_const_area.v_model += exp + "\n"
else:
pi_const_area.v_model = exp + "\n"
pi_const_entry.v_model = ""
def nb_of_terms():
"""
Returns The maximum number of terms for the given model order and the amount of input pi numbers
-------
"""
n = int(anticipated_mo_entry.v_model)
p = len(CHOSEN_PI_LIST) - OUTPUTS
return noc.coefficient_nb(n, p, approx=(p >= 2*n and n > 10))
def mo_to_size(widget, event, data):
"""
Parameters
----------
widget Anticipated model order field
Returns Sets the default wished size to 10x max number of terms
-------
"""
nb_terms = nb_of_terms()
wished_size_entry.v_model = DOE_MULTIPLIER * nb_terms
model_order_entry.v_model = widget.v_model
widget.messages = ""
wished_size_entry.messages = ""
def check_size(widget, event, data):
"""
Returns Checks if the wished size is not too low or too high compared to the default wished size and shows warnings
-------
"""
expected = DOE_MULTIPLIER * nb_of_terms()
if int(wished_size_entry.v_model) > int(2*expected) or\
int(0.5 * expected) > int(wished_size_entry.v_model) >= int(expected/DOE_MULTIPLIER):
wished_size_entry.messages = "Warning: size not advised for model order"
anticipated_mo_entry.messages = "Warning: size not advised for model order"
elif int(wished_size_entry.v_model) < int(expected/DOE_MULTIPLIER):
wished_size_entry.messages = "Warning: size too low for model order, model computation will fail"
anticipated_mo_entry.messages = "Warning: size too low for model order, model computation will fail"
else:
wished_size_entry.messages = ""
anticipated_mo_entry.messages = ""
def gen_doe(widget, event, data):
"""
Returns Displays the generate DOE dialog box and initializes it
-------
"""
global WORKDIR
dialog3.v_model = True
dialog3.children[0].children[1].children = ["Current work directory: " + WORKDIR]
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
doe_tf.v_model = "pyVPLM_" + dt_string
def customize_2d_plot(widget, event, data):
"""
Parameters
----------
widget The current range slider or one of the two selection fields (for the axis)
Returns
-------
"""
global AX, TOTAL_DOE
widget.loading = True
new_df = TOTAL_DOE
i = 0
for col in new_df:
[col_min, col_max] = range_sliders.children[2*i + 1].v_model
new_df = new_df[(new_df[col] >= col_min) & (new_df[col] <= col_max)]
i += 1
with customizable_2d_plot_output:
clear_output(wait=True)
AX.clear()
AX.set_xlabel(select_2d_x.v_model)
AX.set_ylabel(select_2d_y.v_model)
AX.plot(new_df[select_2d_x.v_model], new_df[select_2d_y.v_model], 'o')
display(AX.figure)
widget.loading = False
def init_doe_plots(doeX, parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active, pi_set, log=True):
"""
Parameters
----------
doeX numpy array with the DOE of physical parameters
parameter_set PositiveParameterSet with all input physical parameters
doePi numpy array with the DOE of pi numbers (elected points)
doePi_all numpy array with the DOE of pi numbers (all points)
doePi_nearest numpy array with the DOE of pi numbers (3 nearest from objective points)
doePi_all_obj numpy array with the DOE of pi numbers (all objective points)
doePI_active numpy array with the DOE of pi numbers (active objective points)
pi_set PositiveParameterSet with all input pi numbers
log Toggles display in log space for all plots
Returns Initializes all DOE plots
-------
"""
spl.add_temp(WORKDIR)
_, _, ww, _ = GetWindowRect(GetForegroundWindow())
error = False
if log:
doeX = np.log10(doeX)
doePi = np.log10(doePi)
doePi_all = np.log10(doePi_all)
doePi_nearest = np.log10(doePi_nearest)
doePi_all_obj = np.log10(doePi_all_obj)
doePI_active = np.log10(doePI_active)
columns = []
constants = []
for key in parameter_set.dictionary:
if log:
column_name = f"log10({key}) [{parameter_set.dictionary[key].defined_units}]"
else:
column_name = f"{key} [{parameter_set.dictionary[key].defined_units}]"
columns.append(column_name)
if len(parameter_set.dictionary[key].defined_bounds) == 0:
constants.append(column_name)
df = pd.DataFrame(data=doeX, columns=columns)
df = df.drop(labels=constants, axis=1)
phy_scatter_matrix_output.clear_output()
with phy_scatter_matrix_output:
try:
plt.rcParams['axes.labelsize'] = 14
sm1 = scatter_matrix(df, figsize=(30*ww/1928, 30*ww/1928), alpha=0.9, diagonal="kde")
for i in range(np.shape(sm1)[0]):
for j in range(np.shape(sm1)[1]):
if i < j:
sm1[i, j].set_visible(False)
elif i == j:
x_ = sm1[i, j].lines[0].get_xdata()
y_ = sm1[i, j].lines[0].get_ydata()
sm1[i, j].fill_between(x_, y_, alpha=0.54) # Petite ref
try:
plt.savefig(WORKDIR + "\\temp\\phy_scatter_matrix.pdf")
except Exception:
new_log("Failed to save phy_scatter_matrix.pdf in \\temp", False)
plt.show()
except ValueError:
error = True
columns_2 = []
for key in pi_set.dictionary:
if log:
columns_2.append("log10(" + key + ")")
else:
columns_2.append(key)
df_2 = pd.DataFrame(data=doePi, columns=columns_2)
constant_pi = cpi.get_constant_pi(df_2)
df_2 = df_2.drop(labels=constant_pi, axis=1)
df_2_1 = | pd.DataFrame(data=doePi_all, columns=columns_2) | pandas.DataFrame |
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import unittest
import tempfile
import os
import pandas as pd
import random
import pytest
from coremltools.models.utils import (
evaluate_classifier,
evaluate_classifier_with_probabilities,
_macos_version,
_is_macos,
)
from coremltools._deps import (
_HAS_LIBSVM,
MSG_LIBSVM_NOT_FOUND,
_HAS_SKLEARN,
MSG_SKLEARN_NOT_FOUND,
)
if _HAS_LIBSVM:
from libsvm import svm, svmutil
from svmutil import svm_train, svm_predict
from libsvm import svmutil
from coremltools.converters import libsvm
if _HAS_SKLEARN:
from sklearn.svm import NuSVC
from sklearn.preprocessing import OneHotEncoder
from coremltools.converters import sklearn as scikit_converter
@unittest.skipIf(not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND)
class NuSvcScikitTest(unittest.TestCase):
"""
Unit test class for testing scikit-learn converter.
"""
def _evaluation_test_helper(
self,
class_labels,
use_probability_estimates,
allow_slow,
allowed_prob_delta=0.00001,
):
# Parameters to test
kernel_parameters = [
{},
{"kernel": "rbf", "gamma": 1.2},
{"kernel": "linear"},
{"kernel": "poly"},
{"kernel": "poly", "degree": 2},
{"kernel": "poly", "gamma": 0.75},
{"kernel": "poly", "degree": 0, "gamma": 0.9, "coef0": 2},
{"kernel": "sigmoid"},
{"kernel": "sigmoid", "gamma": 1.3},
{"kernel": "sigmoid", "coef0": 0.8},
{"kernel": "sigmoid", "coef0": 0.8, "gamma": 0.5},
]
non_kernel_parameters = [
{},
{"nu": 0.75},
{"nu": 0.25, "shrinking": True},
{"shrinking": False},
]
# Generate some random data
x, y = [], []
random.seed(42)
for _ in range(50):
x.append(
[random.gauss(200, 30), random.gauss(-100, 22), random.gauss(100, 42)]
)
y.append(random.choice(class_labels))
column_names = ["x1", "x2", "x3"]
# make sure first label is seen first, second is seen second, and so on.
for i, val in enumerate(class_labels):
y[i] = val
df = | pd.DataFrame(x, columns=column_names) | pandas.DataFrame |
"""Tests for encodings submodule."""
from nxviz import encodings as aes
import pytest
import pandas as pd
from random import choice
import numpy as np
def categorical_series():
"""Generator for categorical series."""
categories = "abc"
return pd.Series([choice(categories) for _ in range(30)])
def continuous_series():
"""Generator for continuous-valued series."""
values = np.linspace(0, 2, 100)
return pd.Series(values)
def ordinal_series():
"""Generator for an ordinal series."""
values = [1, 2, 3, 4]
return | pd.Series(values) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gen_sgRNAs.py generates sgRNAs as part of ExcisionFinder. New Cas enzymes can be added by modifying CAS_LIST.txt.
Written in Python v 3.6.1.
<NAME> et al 2018.
Usage:
gen_sgRNAs.py [-chvrd] <bcf> <annots_file> <locus> <pams_dir> <ref_fasta> <out> <cas_types> <guide_length> [<gene_vars>] [--crispor=<ref_gen>] [--hom] [--bed] [--max_indel=<S>] [--strict]
gen_sgRNAs.py [-chvrd] <locus> <pams_dir> <ref_fasta> <out> <cas_types> <guide_length> [<gene_vars>] [--crispor=<ref_gen>] [--hom] [--bed] [--max_indel=<S>] --ref_guides [--strict]
gen_sgRNAs.py -C | --cas-list
Arguments:
bcf BCF/VCF file with genotypes.
annots_file Annotated variant for whether each generates an allele-specific sgRNA site.
locus Locus of interest in format chrom:start-stop. Put filepath to BED file here if '--bed'.
pams_dir Directory where pam locations in the reference genome are located.
ref_genome_fasta Fasta file for reference genome used, e.g. hg38.
out Directory in which to save the output files.
cas_types Cas types you would like to analyze, comma-separated (e.g. SpCas9,SaCas9).
guide_length Guide length, commonly 20 bp, comma-separated if different for different cas types.
Options:
gene_vars Optional. 1KGP originating file to add rsID and allele frequency (AF) data to variants.
-h --help Show this screen and exit.
-c Do not take the reverse complement of the guide sequence for '-' stranded guides (when the PAM is on the 5' end).
-v Run in verbose mode (especially useful for debugging, but also for knowing status of script)
--hom Use 'homozygous' mode, personalized sgRNA design. Do not use if ref_guides is specified, they are redundant and non-compatible.
--crispor=<ref_gen> Add CRISPOR specificity scores to outputted guides. From Haeussler et al. Genome Biology 2016.
Equals directory name of reference genome (complete).
--bed Design sgRNAs for multiple regions specified in a BED file.
--max_indel=<S> Maximum size for INDELS. Must be smaller than guide_length [default: 5].
-r Return guides as RNA sequences rather than DNA sequences.
-d Return dummy guides (all --- as opposed to GGG or CCC) for variants without a PAM, e.g. when variant makes or breaks a PAM.
-C --cas-list List available cas types and exits.
--ref_guides Design guides for reference genome, ignoring variants in region.
--strict Only design allele-specific guides where the variant makes or breaks a PAM site.
"""
import pandas as pd
import numpy as np
from docopt import docopt
import os
import cas_object
from pyfaidx import Fasta
from collections import Counter
import regex
import re
from Bio import SeqIO
import subprocess
from io import StringIO
import logging
__version__ = "0.0.1"
REQUIRED_BCFTOOLS_VER = "1.5"
# COLUMN_ORDER=['chrom','variant_position','ref','alt','gRNA_ref','gRNA_alt',
# 'variant_position_in_guide','start','stop','strand','cas_type','guide_id','rsID','AF']
# get rid of annoying false positive Pandas error
pd.options.mode.chained_assignment = None
def find_spec_pams(cas_obj, python_string, orient):
# orient specifies whether this is a 3prime PAM (e.g. Cas9, PAM seq 3' of sgRNA)
# or a 5prime PAM (e.g. cpf1, PAM 5' of sgRNA)
# get sequence
sequence = python_string
# get PAM sites (the five prime three prime thing will need to be reversed for cpf1)
def get_pam_fiveprime(pam_regex, sequence):
starts = []
for pam in regex.finditer(
pam_regex, sequence, regex.IGNORECASE, overlapped=True
):
starts.append(pam.start())
return starts
def get_pam_threeprime(pam_regex, sequence):
starts = []
for pam in regex.finditer(
pam_regex, sequence, regex.IGNORECASE, overlapped=True
):
starts.append(pam.end())
return starts
if orient == "3'":
for_starts = get_pam_fiveprime(cas_obj.forwardPam_regex(), sequence)
rev_starts = get_pam_threeprime(cas_obj.reversePam_regex(), sequence)
elif orient == "5'":
for_starts = get_pam_threeprime(cas_obj.forwardPam_regex(), sequence)
rev_starts = get_pam_fiveprime(cas_obj.reversePam_regex(), sequence)
return (for_starts, rev_starts)
def het(genotype):
# if genotype == '.':
# return False
if ':' in genotype:
gen1, gen2 = re.split("/|\|", genotype.split(':')[0])
else:
gen1, gen2 = re.split("/|\|", genotype)
return gen1 != gen2
def check_bcftools():
"""
Checks bcftools version, and exits the program if the version is incorrect
"""
version = (
subprocess.run(
"bcftools -v | head -1 | cut -d ' ' -f2", shell=True, stdout=subprocess.PIPE
)
.stdout.decode("utf-8")
.rstrip()
)
if float(version) >= float(REQUIRED_BCFTOOLS_VER):
logging.info(f"bcftools version {version} running")
else:
logging.error(
f"Error: bcftools must be >={REQUIRED_BCFTOOLS_VER}. Current version: {version}"
)
exit(1)
def get_alt_seq(
chrom,
pam_start,
var_pos,
ref,
alt,
guide_length,
ref_genome,
strand="positive",
var_type="near_pam",
):
chrom = chrom.replace("chr", "")
if strand == "positive":
if var_type == "near_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start - guide_length - 1 : pam_start - 1
]
# alt sgRNA
alt_seq = (
ref_genome["chr" + str(chrom)][
pam_start - guide_length - len(alt) : var_pos - 1
].lower()
+ alt.upper()
+ ref_genome["chr" + str(chrom)][
var_pos: pam_start - 1
].lower()
)
elif var_type == "destroys_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start - guide_length - 1 : pam_start - 1
]
# in this case, variant is destroying a PAM, rendering the alternate allele no longer a CRISPR site
# therefore, for lack of a better solution, return empty alt_seq
alt_seq = "G" * guide_length
elif var_type == "makes_pam": # this might break with indels
# reference sgRNA
ref_seq = "G" * guide_length
# in this case, variant is destroying a PAM, rendering the reference allele no longer a CRISPR site
# therefore, for lack of a better solution, return empty ref_seq
alt_seq = ref_genome["chr" + str(chrom)][
pam_start - guide_length : pam_start
]
return ref_seq.upper(), alt_seq.upper()
elif strand == "negative":
if var_type == "near_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start : pam_start + guide_length
]
# alt sgRNA
alt_seq = (
ref_genome["chr" + str(chrom)][pam_start : var_pos - 1].lower()
+ alt.upper()
+ ref_genome["chr" + str(chrom)][
var_pos : pam_start + guide_length - len(alt) + 1
].lower()
)
elif var_type == "destroys_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start : pam_start + guide_length
]
# in this case, variant is destroying a PAM, rendering the alternate allele no longer a CRISPR site
# therefore, for lack of a better solution, return empty alt_seq
alt_seq = "G" * guide_length
elif var_type == "makes_pam": # this might break with indels
# reference sgRNA
ref_seq = "G" * guide_length
alt_seq = ref_genome["chr" + str(chrom)][
pam_start : pam_start + guide_length
]
return ref_seq.upper(), alt_seq.upper()
else:
logging.info("Must specify strand.")
exit(1)
def make_rev_comp(s):
"""
Generates reverse comp sequences from an input sequence.
"""
return s[::-1].translate(s[::-1].maketrans("ACGT", "TGCA"))
def get_crispor_scores(out_df, outdir, ref_gen):
guide_seqs_ref = [">ref_guide_seqs\n"]
guide_seqs_alt = [">alt_guide_seqs\n"]
for index, row in out_df.iterrows():
guide_seqs_ref.append(
row["gRNA_ref"] + "GGGNN\n"
) # the NN splits things up for CRISPOR
guide_seqs_alt.append(row["gRNA_alt"] + "GGGNN\n")
with open("ref_seqs_nosave.fa", "w") as f:
for seq in guide_seqs_ref:
f.write(seq)
with open("alt_seqs_nosave.fa", "w") as f:
for seq in guide_seqs_alt:
f.write(seq)
# get script dir
scriptsdir = os.path.join(os.path.dirname(__file__), "crispor")
run_name = os.path.join(
scriptsdir, f"crispor.py --skipAlign --noEffScores -g {ref_gen} {ref_gen}"
)
print("Running crispor.")
# error_out = os.path.join(outdir, 'crispor_error.txt')
error_out = os.path.join(os.path.dirname(outdir), "crispor_error.txt")
command = f"source activate crispor; \
python2 {run_name} ref_seqs_nosave.fa nosave_ref_scores.tsv &> {error_out};\
python2 {run_name} alt_seqs_nosave.fa nosave_alt_scores.tsv &> {error_out};\
source deactivate crispor"
subprocess.run(command, shell=True)
print("crispor done")
# subprocess.run('source deactivate crispor', shell=True)
# remove seq files
os.remove("ref_seqs_nosave.fa")
os.remove("alt_seqs_nosave.fa")
# grab scores from files outputted from CRISPOR
score_dir_ref = pd.read_csv(
"nosave_ref_scores.tsv",
sep="\t",
header=None,
names=[
"seqId",
"guideId",
"targetSeq",
"mitSpecScore",
"offtargetCount",
"targetGenomeGeneLocus",
],
)
score_dir_alt = pd.read_csv(
"nosave_alt_scores.tsv",
sep="\t",
header=None,
names=[
"seqId",
"guideId",
"targetSeq",
"mitSpecScore",
"offtargetCount",
"targetGenomeGeneLocus",
],
)
# remove original score files
os.remove("nosave_ref_scores.tsv")
os.remove("nosave_alt_scores.tsv")
# merge score info with original out_df
merge_df_ref = pd.DataFrame()
merge_df_ref["scores_ref"] = score_dir_ref["mitSpecScore"]
merge_df_ref["offtargcount_ref"] = score_dir_ref["offtargetCount"]
merge_df_ref["gRNA_ref"] = score_dir_ref["targetSeq"].str[
:-3
] # get rid of added on PAM site
merge_df_alt = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 16:51:36 2021
@author: FELIPE
"""
# Note: Read the header before running
# =============================================================================
# >>> Project: Disaster Response Pipeline (Udacity - Data Science Nanodegree) <<<
# How to execute this file
# Sample Script Syntax:
# > python process_data.py <path to messages csv file> <path to categories csv file> <path to sqllite destination db>
# Sample script execution:
# > python process_data.py disaster_messages.csv disaster_categories.csv disaster_response_db.db
# =============================================================================
# ETL PIPELINE PREPARATION
# Loading libraries
import pandas as pd
from sqlalchemy import create_engine
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
# ============================================================================
# Module to load messages
def load_mess_categ(dataset_messages, dataset_category):
"""
>>> This loads messages dataset with categories dataset
>>> Function arguments:
- dataset_messages: Path to the csv file containing messages
- dataset_category: Path to the csv file containing categories
>>> Function output:
df: Merged dataset with the messages and categories datasets
"""
messages_dataset = | pd.read_csv(dataset_messages) | pandas.read_csv |
"""
Functions that plot the results from the simulated experiments.
@author: <NAME> <<EMAIL>>
"""
import seaborn as sns
import matplotlib
matplotlib.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
import pandas as pd
"""
Global dictionaries used to store LaTeX formatting for labels used in plots.
"""
mechanism_name_map = {
#Non-Parametric Mechanisms
"BASELINE: MSE": r'MSE',
"DMI: 4": r'DMI',
"OA: 0": r'OA',
"Phi-DIV: CHI_SQUARED": r'$\Phi$-Div: $\chi^2$',
"Phi-DIV: KL": r'$\Phi$-Div: KL',
"Phi-DIV: SQUARED_HELLINGER": r'$\Phi$-Div: $H^2$',
"Phi-DIV: TVD": r'$\Phi$-Div: TVD',
"PTS: 0": r'PTS',
#Parametric Mechanisms
"MSE_P: 0": r'MSE$_P$',
"Phi-DIV_P: CHI_SQUARED": r'$\Phi$-Div$_P$: $\chi^2$',
"Phi-DIV_P: KL": r'$\Phi$-Div$_P$: KL',
"Phi-DIV_P: SQUARED_HELLINGER": r'$\Phi$-Div$_P$: $H^2$',
"Phi-DIV_P: TVD": r'$\Phi$-Div$_P$: TVD',
}
mechanism_color_map = {
#Non-Parametric Mechanisms
r'MSE': '#a6cee3',
r'DMI': '#d9ef8b',
r'OA': '#b15928',
r'$\Phi$-Div: $\chi^2$': '#b2df8a',
r'$\Phi$-Div: KL': '#fb9a99',
r'$\Phi$-Div: $H^2$': '#cab2d6',
r'$\Phi$-Div: TVD': '#fdbf6f',
r'PTS': '#01665e',
#Parametric Mechanisms
r'MSE$_P$': '#1f78b4',
r'$\Phi$-Div$_P$: $\chi^2$': '#33a02c',
r'$\Phi$-Div$_P$: KL': '#e31a1c',
r'$\Phi$-Div$_P$: $H^2$': '#6a3d9a',
r'$\Phi$-Div$_P$: TVD': '#ff7f00',
}
mechanism_marker_map = {
#Non-Parametric Mechanisms
r'MSE': 'o',
r'DMI': 'o',
r'OA': 'o',
r'$\Phi$-Div: $\chi^2$': 'o',
r'$\Phi$-Div: KL': 'o',
r'$\Phi$-Div: $H^2$': 'o',
r'$\Phi$-Div: TVD': 'o',
r'PTS': 'o',
#Parametric Mechanisms
r'MSE$_P$': 'P',
r'$\Phi$-Div$_P$: $\chi^2$': 'P',
r'$\Phi$-Div$_P$: KL': 'P',
r'$\Phi$-Div$_P$: $H^2$': 'P',
r'$\Phi$-Div$_P$: TVD': 'P',
}
mechanism_dash_map = {
#Non-Parametric Mechanisms
r'MSE': (1, 1),
r'DMI': (1, 1),
r'OA': (1, 1),
r'$\Phi$-Div: $\chi^2$': (1, 1),
r'$\Phi$-Div: KL': (1, 1),
r'$\Phi$-Div: $H^2$': (1, 1),
r'$\Phi$-Div: TVD': (1, 1),
r'PTS': (1, 1),
#Parametric Mechanisms
r'MSE$_P$': (5, 5),
r'$\Phi$-Div$_P$: $\chi^2$': (5, 5),
r'$\Phi$-Div$_P$: KL': (5, 5),
r'$\Phi$-Div$_P$: $H^2$': (5, 5),
r'$\Phi$-Div$_P$: TVD': (5, 5),
}
strategy_color_map = {
"NOISE": '#999999',
"FIX-BIAS": '#f781bf',
"MERGE": '#a65628',
"PRIOR": '#ffff33',
"ALL10": '#ff7f00',
"HEDGE": '#984ea3'
}
strategy_marker_map = {
"NOISE": 's',
"FIX-BIAS": 's',
"MERGE": 's',
"PRIOR": 's',
"ALL10": 's',
"HEDGE": 's'
}
strategy_dash_map = {
"NOISE": (3, 1, 1, 1, 1, 1),
"FIX-BIAS": (3, 1, 1, 1, 1, 1),
"MERGE": (3, 1, 1, 1, 1, 1),
"PRIOR": (3, 1, 1, 1, 1, 1),
"ALL10": (3, 1, 1, 1, 1, 1),
"HEDGE": (3, 1, 1, 1, 1, 1)
}
estimation_procedure_map = {
"Consensus-Grade": r'Consensus Grade',
"Procedure": r'Procedure',
"Procedure-NB": r'Procedure-NB'
}
estimation_procedure_color_map = {
r'Consensus Grade': '#b2df8a',
r'Procedure': '#1f78b4',
r'Procedure-NB': '#a6cee3'
}
def plot_median_auc(results, filename):
"""
Used for Binary Effort setting. Generates a lineplot of the median AUC as the number of active graders varies.
Parameters
----------
results : dict.
{ num: { mechanism: { "Median ROC-AUC": score } } },
where num is the number of active graders (int), mechanism is the name of a mechanism (str, one of the keys of the global mechanism_name_map), and score is a float.
filename : str.
Name of the file used for saving the plot (as a .pdf).
Returns
-------
None.
"""
global mechanism_name_map
formatted_results = {"Number of Active Graders": [],
"Median AUC": [],
"Mechanism": []
}
for key in results.keys():
mechanisms = list(results[key].keys())
for mechanism in mechanisms:
formatted_results["Number of Active Graders"].append(key)
formatted_results["Median AUC"].append(results[key][mechanism]["Median ROC-AUC"])
formatted_results["Mechanism"].append(mechanism_name_map[mechanism])
results_df = pd.DataFrame(data=formatted_results)
_ = sns.lineplot(x="Number of Active Graders", y="Median AUC", hue="Mechanism", style="Mechanism", markers=mechanism_marker_map, dashes=mechanism_dash_map, data=results_df, palette=mechanism_color_map)
plt.tight_layout()
figure_file = "figures/" + filename + ".pdf"
plt.savefig(figure_file, dpi=300)
plt.show()
plt.close()
def plot_auc_scores(results, filename):
"""
Used for Binary Effort setting. Generates a boxplot of the AUC scores of each mechanism for a fixed number of active graders.
Parameters
----------
results : dict.
{ mechanism: { "ROC-AUC Scores": [ score ] } },
where mechanism is the name of a mechanism (str, one of the keys of the global mechanism_name_map) and each score in the list is a float.
filename : str.
Name of the file used for saving the plot (as a .pdf).
Returns
-------
None.
"""
global mechanism_name_map
formatted_results = {"AUC": [],
"Mechanism": []
}
mechanisms = list(results.keys())
for mechanism in mechanisms:
for auc in results[mechanism]["ROC-AUC Scores"]:
formatted_results["AUC"].append(auc)
formatted_results["Mechanism"].append(mechanism_name_map[mechanism])
results_df = pd.DataFrame(data=formatted_results)
_ = sns.boxplot(x="Mechanism", y="AUC", data=results_df, palette=mechanism_color_map)
plt.xticks(rotation=45)
plt.tight_layout()
figure_file = "figures/" + filename + ".pdf"
plt.savefig(figure_file, dpi=300)
plt.show()
plt.close()
def plot_auc_strategic(results, filename):
"""
Used with strategic agents to compare payments between strategic and truthful agents.
For each mechanism, generates boxplots of the AUC scores as the number of strategic agents varies.
Parameters
----------
results : dict.
{ strategy: { num: { mechanism: { "ROC-AUC Scores": [ score ] } } } },
where strategy is the name of the strategy (str), num is the number of strategic graders (int), mechanism is the name of a mechanism (str, one of the keys of the global mechanism_name_map), and each score in the list is a float.
filename : str.
Name of the file prefix used for saving the plots (as a .pdf).
Returns
-------
None.
"""
global mechanism_name_map
formatted_results = {"Number of Strategic Graders": [],
"AUC": [],
"Mechanism": [],
"Strategy": []
}
for strategy in results.keys():
for key in results[strategy].keys():
if int(key) in [10, 30, 50, 70, 90]:
mechanisms = list(results[strategy][key].keys())
for mechanism in mechanisms:
for s in results[strategy][key][mechanism]["ROC-AUC Scores"]:
formatted_results["Number of Strategic Graders"].append(key)
formatted_results["AUC"].append(s)
formatted_results["Mechanism"].append(mechanism_name_map[mechanism])
formatted_results["Strategy"].append(strategy)
results_df = pd.DataFrame(data=formatted_results)
for mechanism in mechanisms:
title = mechanism_name_map[mechanism]
mechanism_df = results_df.loc[results_df["Mechanism"] == title]
_ = sns.boxplot(x="Number of Strategic Graders", y="AUC", hue="Strategy", palette=strategy_color_map, data=mechanism_df)
plt.title(title)
plt.tight_layout()
figure_file = "figures/" + filename + "-" + mechanism + ".pdf"
plt.savefig(figure_file, dpi=300)
plt.show()
plt.close()
def plot_estimation_mses(results, filename):
"""
Used for true grade recovery tests with the various estimation procedures. Generates a boxplot of the MSE scores for each procedure.
Parameters
----------
results : dict.
{ procedure: { "MSE Scores": [ score ] } },
where procedure is the name of the procedure (str, one of the keys of the global estimation_procedure_map) and each score in the list is a float.
filename : str.
Name of the file used for saving the plot (as a .pdf).
Returns
-------
None.
"""
global estimation_procedure_map
formatted_results = {
"MSE": [],
"Estimation Method": []
}
mechanisms = list(results.keys())
for mechanism in mechanisms:
for m in results[mechanism]["MSE Scores"]:
formatted_results["MSE"].append(m)
formatted_results["Estimation Method"].append(estimation_procedure_map[mechanism])
results_df = pd.DataFrame(data=formatted_results)
_ = sns.boxplot(x="Estimation Method", y="MSE", palette=estimation_procedure_color_map, data=results_df)
plt.xticks(rotation=45)
plt.tight_layout()
figure_file = "figures/" + filename + ".pdf"
plt.savefig(figure_file, dpi=300)
plt.show()
plt.close()
def plot_kendall_tau(results, filename):
"""
Used for Continuous Effort setting. Generates a boxplot with the Kendall rank correlation coefficient (tau) scores of each mechanism.
Parameters
----------
results : dict.
{ mechanism: { "Tau Scores": [ score ] } },
where mechanism is the name of a mechanism (str, one of the keys of the global mechanism_name_map) and each score in the list is a float.
filename : str.
Name of the file used for saving the plot (as a .pdf).
Returns
-------
None.
"""
global mechanism_name_map
formatted_results = {"Tau": [],
"Mechanism": []
}
mechanisms = list(results.keys())
for mechanism in mechanisms:
for t in results[mechanism]["Tau Scores"]:
formatted_results["Tau"].append(t)
formatted_results["Mechanism"].append(mechanism_name_map[mechanism])
results_df = pd.DataFrame(data=formatted_results)
_ = sns.boxplot(x="Mechanism", y="Tau", data=results_df, palette=mechanism_color_map)
plt.xticks(rotation=45)
_.set_ylabel(r'$\tau_B$')
plt.tight_layout()
figure_file = "figures/" + filename + ".pdf"
plt.savefig(figure_file, dpi=300)
plt.show()
plt.close()
def plot_kendall_taus(results, filename):
"""
Used with strategic agents to compare mechanism performance as the number of strategic agents varies.
For each strategy, generates a boxplot with the tau scores of each mechanism as the number of strategic agents varies.
Parameters
----------
results : dict.
{ strategy: { num : { mechanism: { "Tau Scores": [ score ] } } } },
where strategy is the name of the strategy (str), num is the number of strategic graders (int), mechanism is the name of a mechanism (str, one of the keys of the global mechanism_name_map), and each score in the list is a float.
filename : str.
Name of the file prefix used for saving the plots (as a .pdf).
Returns
-------
None.
"""
global mechanism_name_map
mechanisms = []
strategies = []
formatted_results = {"Number of Strategic Graders": [],
"Tau": [],
"Mechanism": [],
"Strategy": []
}
strategies = list(results.keys())
for strategy in results.keys():
for key in results[strategy].keys():
if int(key) in [0, 20, 40, 60, 80, 100]:
mechanisms = list(results[strategy][key].keys())
for mechanism in mechanisms:
for t in results[strategy][key][mechanism]["Tau Scores"]:
formatted_results["Number of Strategic Graders"].append(key)
formatted_results["Tau"].append(t)
formatted_results["Mechanism"].append(mechanism_name_map[mechanism])
formatted_results["Strategy"].append(strategy)
results_df = pd.DataFrame(data=formatted_results)
for strategy in strategies:
title = strategy
strategy_df = results_df.loc[results_df["Strategy"] == title]
ax = sns.boxplot(x="Number of Strategic Graders", y="Tau", hue="Mechanism", palette=mechanism_color_map, data=strategy_df)
ax.set_ylabel(r'$\tau_B$')
plt.title(title)
plt.tight_layout()
figure_file = "figures/" + filename + "-" + strategy + ".pdf"
plt.savefig(figure_file, dpi=300)
plt.show()
plt.close()
def plot_kendall_tau_variances(results, filename):
"""
Used for Continuous Effort setting. Generates a boxplot with the variances of the Kendall rank correlation coefficient (tau) scores of each mechanism.
Parameters
----------
results : dict.
{ mechanism: { "Tau Variances": [ score ] } },
where mechanism is the name of a mechanism (str, one of the keys of the global mechanism_name_map) and each score in the list is a float.
filename : str.
Name of the file used for saving the plot (as a .pdf).
Returns
-------
None.
"""
global mechanism_name_map
formatted_results = {"Variance": [],
"Mechanism": []
}
mechanisms = list(results.keys())
for mechanism in mechanisms:
for v in results[mechanism]["Tau Variances"]:
formatted_results["Variance"].append(v)
formatted_results["Mechanism"].append(mechanism_name_map[mechanism])
results_df = | pd.DataFrame(data=formatted_results) | pandas.DataFrame |
# coding=utf-8
import math
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# ### 设置
tf.logging.set_verbosity(tf.logging.ERROR) # 日志
pd.options.display.max_rows = 10 # 显示的最大行数
pd.options.display.float_format = '{:.1f}'.format
pd.set_option('display.max_columns', None) # 显示的最大列数, None表示显示所有列
pd.set_option('display.width', 200) # 显示宽度(以字符为单位)
pd.set_option('max_colwidth', 100) # 列长度,默认为50
pd.set_option('expand_frame_repr', False) # 是否换行显示,False表示不允许, True表示允许
california_housing_dataframe = pd.read_csv("Zcalifornia_housing_train.csv", sep=",") # 加载数据集
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index)) # 对数据进行随机化处理
california_housing_dataframe["median_house_value"] /= 1000.0 # 将median_house_value调整为以千为单位
print("california_housing_dataframe: ", california_housing_dataframe)
# ### 检查数据
print("california_housing_dataframe description: ", california_housing_dataframe.describe()) # 各列的统计摘要
# ### 构建第一个模型
# 第1步:定义特征并配置特征列
my_feature = california_housing_dataframe[["total_rooms"]] # 从california_housing_dataframe中提取total_rooms数据
feature_columns = [tf.feature_column.numeric_column("total_rooms")] # 使用numeric_column定义特征列,将其数据指定为数值
# 第2步:定义目标
targets = california_housing_dataframe["median_house_value"]
# 第3步:配置LinearRegressor
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.00005) # 使用梯度下降法训练模型
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) # 应用梯度裁剪到优化器
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer) # 配置linear_regressor
# 第4步:定义输入函数
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): # 定义输入函数
"""Trains a linear regression model of one feature.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
features = {key: np.array(value) for key, value in dict(features).items()} # 将Pandas特征数据转换成NumPy数组字典
ds = Dataset.from_tensor_slices((features, targets)) # 数据构建Dataset对象
ds = ds.batch(batch_size).repeat(num_epochs) # 将数据拆分成多批数据,以按照指定周期数进行重复
if shuffle: # 如果shuffle设置为True,则会对数据进行随机处理,以便数据在训练期间以随机方式传递到模型
ds = ds.shuffle(buffer_size=10000) # buffer_size参数指定shuffle从中随机抽样的数据集的大小
features, labels = ds.make_one_shot_iterator().get_next() # 为该数据集构建一个迭代器,并向LinearRegressor返回下一批数据
return features, labels
# 第5步:训练模型
_ = linear_regressor.train(
input_fn=lambda: my_input_fn(my_feature, targets),
steps=100) # 在 linear_regressor 上调用 train() 来训练模型
# 第6步:评估模型
prediction_input_fn = lambda: my_input_fn(my_feature, targets, num_epochs=1, shuffle=False)
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
mean_squared_error = metrics.mean_squared_error(predictions, targets) # 均方误差 (MSE)
root_mean_squared_error = math.sqrt(mean_squared_error) # 均方根误差 (RMSE)
print("Mean Squared Error (on training data): %0.3f" % mean_squared_error)
print("Root Mean Squared Error (on training data): %0.3f" % root_mean_squared_error)
min_house_value = california_housing_dataframe["median_house_value"].min()
max_house_value = california_housing_dataframe["median_house_value"].max()
min_max_difference = max_house_value - min_house_value # 比较RMSE与目标最大值和最小值的差值
print("Min. Median House Value: %0.3f" % min_house_value)
print("Max. Median House Value: %0.3f" % max_house_value)
print("Difference between Min. and Max.: %0.3f" % min_max_difference)
print("Root Mean Squared Error: %0.3f" % root_mean_squared_error)
# 根据总体摘要统计信息,预测和目标的符合情况
calibration_data = | pd.DataFrame() | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.encoders import MultiClassEncoder, WOEEncoder
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
X = pd.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = pd.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
)
return obj, X, X_expected
@pytest.fixture
def data_float32():
X = pd.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = pd.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder(), dtype=np.float32).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
).astype(np.float32)
return obj, X, X_expected
@pytest.fixture
def data_no_cat():
X = pd.DataFrame(
np.zeros((3, 6)),
columns=list("qweasd"),
)
y = pd.Series([1, 2, 0], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
return obj, X, X.copy()
@pytest.fixture
def data_ks():
X = ks.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = ks.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
)
return obj, X, X_expected
@pytest.fixture
def data_float32_ks():
X = ks.DataFrame(
{
"A": ["Q", "Q", "Q", "W", "W", "W"],
"B": ["Q", "Q", "W", "W", "W", "W"],
"C": ["Q", "Q", "Q", "Q", "W", "W"],
"D": [1, 2, 3, 4, 5, 6],
}
)
y = ks.Series([0, 0, 1, 2, 1, 2], name="TARGET")
obj = MultiClassEncoder(WOEEncoder(), dtype=np.float32).fit(X, y)
X_expected = pd.DataFrame(
{
"D": {0: 1.0, 1: 2.0, 2: 3.0, 3: 4.0, 4: 5.0, 5: 6.0},
"A__TARGET_1_WOEEncoder": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0},
"B__TARGET_1_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_1_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"A__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.0,
3: 1.3862943611198906,
4: 1.3862943611198906,
5: 1.3862943611198906,
},
"B__TARGET_2_WOEEncoder": {
0: 0.0,
1: 0.0,
2: 0.6931471805599453,
3: 0.6931471805599453,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
"C__TARGET_2_WOEEncoder": {
0: -0.40546510810816444,
1: -0.40546510810816444,
2: -0.40546510810816444,
3: -0.40546510810816444,
4: 0.6931471805599453,
5: 0.6931471805599453,
},
}
).astype(np.float32)
return obj, X, X_expected
@pytest.fixture
def data_no_cat_ks():
X = ks.DataFrame(
np.zeros((3, 6)),
columns=list("qweasd"),
)
y = ks.Series([1, 2, 0], name="TARGET")
obj = MultiClassEncoder(WOEEncoder()).fit(X, y)
return obj, X, X.to_pandas().copy()
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
from datetime import datetime, timedelta
from ..utils import process_dataframe_and_series
import rich
from jsonpath import jsonpath
from retry import retry
import pandas as pd
import requests
import multitasking
import signal
from tqdm import tqdm
from typing import (Dict,
List,
Union)
from ..shared import session
from ..common import get_quote_history as get_quote_history_for_stock
from ..common import get_history_bill as get_history_bill_for_stock
from ..common import get_today_bill as get_today_bill_for_stock
from ..common import get_realtime_quotes_by_fs
from ..utils import (to_numeric,
get_quote_id)
from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS
from ..common.config import (
FS_DICT,
MARKET_NUMBER_DICT,
EASTMONEY_REQUEST_HEADERS,
EASTMONEY_QUOTE_FIELDS
)
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_base_info_single(stock_code: str) -> pd.Series:
"""
获取单股票基本信息
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
单只股票基本信息
"""
fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys())
params = (
('ut', 'fa5fd1943c7b386f172d6893dbfba10b'),
('invt', '2'),
('fltt', '2'),
('fields', fields),
('secid', get_quote_id(stock_code)),
)
url = 'http://push2.eastmoney.com/api/qt/stock/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
s = pd.Series(json_response['data']).rename(
index=EASTMONEY_STOCK_BASE_INFO_FIELDS)
return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()]
def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame:
"""
获取股票多只基本信息
Parameters
----------
stock_codes : List[str]
股票代码列表
Returns
-------
DataFrame
多只股票基本信息
"""
@multitasking.task
@retry(tries=3, delay=1)
def start(stock_code: str):
s = get_base_info_single(stock_code)
dfs.append(s)
pbar.update()
pbar.set_description(f'Processing => {stock_code}')
dfs: List[pd.DataFrame] = []
pbar = tqdm(total=len(stock_codes))
for stock_code in stock_codes:
start(stock_code)
multitasking.wait_for_tasks()
df = pd.DataFrame(dfs)
return df
@to_numeric
def get_base_info(stock_codes: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""
Parameters
----------
stock_codes : Union[str, List[str]]
股票代码或股票代码构成的列表
Returns
-------
Union[Series, DataFrame]
- ``Series`` : 包含单只股票基本信息(当 ``stock_codes`` 是字符串时)
- ``DataFrane`` : 包含多只股票基本信息(当 ``stock_codes`` 是字符串列表时)
Raises
------
TypeError
当 ``stock_codes`` 类型不符合要求时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票信息
>>> ef.stock.get_base_info('600519')
股票代码 600519
股票名称 贵州茅台
市盈率(动) 39.38
市净率 12.54
所处行业 酿酒行业
总市值 2198082348462.0
流通市值 2198082348462.0
板块编号 BK0477
ROE 8.29
净利率 54.1678
净利润 13954462085.610001
毛利率 91.6763
dtype: object
>>> # 获取多只股票信息
>>> ef.stock.get_base_info(['600519','300715'])
股票代码 股票名称 市盈率(动) 市净率 所处行业 总市值 流通市值 板块编号 ROE 净利率 净利润 毛利率
0 300715 凯伦股份 42.29 3.12 水泥建材 9.160864e+09 6.397043e+09 BK0424 3.97 12.1659 5.415488e+07 32.8765
1 600519 贵州茅台 39.38 12.54 酿酒行业 2.198082e+12 2.198082e+12 BK0477 8.29 54.1678 1.395446e+10 91.6763
"""
if isinstance(stock_codes, str):
return get_base_info_single(stock_codes)
elif hasattr(stock_codes, '__iter__'):
return get_base_info_muliti(stock_codes)
raise TypeError(f'所给的 {stock_codes} 不符合参数要求')
def get_quote_history(stock_codes: Union[str, List[str]],
beg: str = '19000101',
end: str = '20500101',
klt: int = 101,
fqt: int = 1) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
获取股票的 K 线数据
Parameters
----------
stock_codes : Union[str,List[str]]
股票代码、名称 或者 股票代码、名称构成的列表
beg : str, optional
开始日期,默认为 ``'19000101'`` ,表示 1900年1月1日
end : str, optional
结束日期,默认为 ``'20500101'`` ,表示 2050年1月1日
klt : int, optional
行情之间的时间间隔,默认为 ``101`` ,可选示例如下
- ``1`` : 分钟
- ``5`` : 5 分钟
- ``15`` : 15 分钟
- ``30`` : 30 分钟
- ``60`` : 60 分钟
- ``101`` : 日
- ``102`` : 周
- ``103`` : 月
fqt : int, optional
复权方式,默认为 ``1`` ,可选示例如下
- ``0`` : 不复权
- ``1`` : 前复权
- ``2`` : 后复权
Returns
-------
Union[DataFrame, Dict[str, DataFrame]]
股票的 K 线数据
- ``DataFrame`` : 当 ``stock_codes`` 是 ``str`` 时
- ``Dict[str, DataFrame]`` : 当 ``stock_codes`` 是 ``List[str]`` 时
Examples
--------
>>> import efinance as ef
>>> # 获取单只股票日 K 行情数据
>>> ef.stock.get_quote_history('600519')
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
>>> # 获取多只股票历史行情
>>> stock_df = ef.stock.get_quote_history(['600519','300750'])
>>> type(stock_df)
<class 'dict'>
>>> stock_df.keys()
dict_keys(['300750', '600519'])
>>> stock_df['600519']
股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率
0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83
1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13
2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45
3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72
4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25
... ... ... ... ... ... ... ... ... ... ... ... ... ...
4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38
4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79
4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69
4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68
4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51
"""
df = get_quote_history_for_stock(
stock_codes,
beg=beg,
end=end,
klt=klt,
fqt=fqt
)
if isinstance(df, pd.DataFrame):
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
elif isinstance(df, dict):
for stock_code in df.keys():
df[stock_code].rename(columns={'代码': '股票代码',
'名称': '股票名称'
},
inplace=True)
return df
@process_dataframe_and_series(remove_columns_and_indexes=['市场编号'])
@to_numeric
def get_realtime_quotes() -> pd.DataFrame:
"""
获取沪深市场最新行情总体情况
Returns
-------
DataFrame
沪深全市场A股上市公司的最新行情信息(涨跌幅、换手率等信息)
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_realtime_quotes()
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型
0 688787 N海天 277.59 139.48 172.39 139.25 171.66 102.54 85.62 - 78.93 74519 1110318832.0 36.94 5969744000 1213908667 1.688787 沪A
1 301045 N天禄 149.34 39.42 48.95 39.2 48.95 23.61 66.66 - 37.81 163061 683878656.0 15.81 4066344240 964237089 0.301045 深A
2 300532 今天国际 20.04 12.16 12.16 10.69 10.69 2.03 8.85 3.02 -22.72 144795 171535181.0 10.13 3322510580 1989333440 0.300532 深A
3 300600 国瑞科技 20.02 13.19 13.19 11.11 11.41 2.2 18.61 2.82 218.75 423779 541164432.0 10.99 3915421427 3003665117 0.300600 深A
4 300985 致远新能 20.01 47.08 47.08 36.8 39.4 7.85 66.65 2.17 58.37 210697 897370992.0 39.23 6277336472 1488300116 0.300985 深A
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4598 603186 华正新材 -10.0 43.27 44.09 43.27 43.99 -4.81 1.98 0.48 25.24 27697 120486294.0 48.08 6146300650 6063519472 1.603186 沪A
4599 688185 康希诺-U -10.11 476.4 534.94 460.13 530.0 -53.6 6.02 2.74 -2088.07 40239 1960540832.0 530.0 117885131884 31831479215 1.688185 沪A
4600 688148 芳源股份 -10.57 31.3 34.39 31.3 33.9 -3.7 26.07 0.56 220.01 188415 620632512.0 35.0 15923562000 2261706043 1.688148 沪A
4601 300034 钢研高纳 -10.96 43.12 46.81 42.88 46.5 -5.31 7.45 1.77 59.49 323226 1441101824.0 48.43 20959281094 18706911861 0.300034 深A
4602 300712 永福股份 -13.71 96.9 110.94 95.4 109.0 -15.4 6.96 1.26 511.21 126705 1265152928.0 112.3 17645877600 17645877600 0.300712 深A
"""
fs = FS_DICT['stock']
df = get_realtime_quotes_by_fs(fs)
df.rename(columns={'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_history_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票历史单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
沪深市场单只股票历史单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_history_bill('600519')
股票名称 股票代码 日期 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 主力净流入占比 小单流入净占比 中单流入净占比 大单流入净占比 超大单流入净占比 收盘价 涨跌幅
0 贵州茅台 600519 2021-03-04 -3.670272e+06 -2282056.0 5.952143e+06 1.461528e+09 -1.465199e+09 -0.03 -0.02 0.04 10.99 -11.02 2013.71 -5.05
1 贵州茅台 600519 2021-03-05 -1.514880e+07 -1319066.0 1.646793e+07 -2.528896e+07 1.014016e+07 -0.12 -0.01 0.13 -0.19 0.08 2040.82 1.35
2 贵州茅台 600519 2021-03-08 -8.001702e+08 -877074.0 8.010473e+08 5.670671e+08 -1.367237e+09 -6.29 -0.01 6.30 4.46 -10.75 1940.71 -4.91
3 贵州茅台 600519 2021-03-09 -2.237770e+08 -6391767.0 2.301686e+08 -1.795013e+08 -4.427571e+07 -1.39 -0.04 1.43 -1.11 -0.27 1917.70 -1.19
4 贵州茅台 600519 2021-03-10 -2.044173e+08 -1551798.0 2.059690e+08 -2.378506e+08 3.343331e+07 -2.02 -0.02 2.03 -2.35 0.33 1950.72 1.72
.. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
97 贵州茅台 600519 2021-07-26 -1.564233e+09 13142211.0 1.551091e+09 -1.270400e+08 -1.437193e+09 -8.74 0.07 8.67 -0.71 -8.03 1804.11 -5.05
98 贵州茅台 600519 2021-07-27 -7.803296e+08 -10424715.0 7.907544e+08 6.725104e+07 -8.475807e+08 -5.12 -0.07 5.19 0.44 -5.56 1712.89 -5.06
99 贵州茅台 600519 2021-07-28 3.997645e+08 2603511.0 -4.023677e+08 2.315648e+08 1.681997e+08 2.70 0.02 -2.72 1.57 1.14 1768.90 3.27
100 贵州茅台 600519 2021-07-29 -9.209842e+08 -2312235.0 9.232964e+08 -3.959741e+08 -5.250101e+08 -8.15 -0.02 8.17 -3.50 -4.65 1749.79 -1.08
101 贵州茅台 600519 2021-07-30 -1.524740e+09 -6020099.0 1.530761e+09 1.147248e+08 -1.639465e+09 -11.63 -0.05 11.68 0.88 -12.51 1678.99 -4.05
"""
df = get_history_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_today_bill(stock_code: str) -> pd.DataFrame:
"""
获取单只股票最新交易日的日内分钟级单子流入流出数据
Parameters
----------
stock_code : str
股票代码
Returns
-------
DataFrame
单只股票最新交易日的日内分钟级单子流入流出数据
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_today_bill('600519')
股票代码 时间 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入
0 600519 2021-07-29 09:31 -3261705.0 -389320.0 3651025.0 -12529658.0 9267953.0
1 600519 2021-07-29 09:32 6437999.0 -606994.0 -5831006.0 -42615994.0 49053993.0
2 600519 2021-07-29 09:33 13179707.0 -606994.0 -12572715.0 -85059118.0 98238825.0
3 600519 2021-07-29 09:34 15385244.0 -970615.0 -14414632.0 -86865209.0 102250453.0
4 600519 2021-07-29 09:35 7853716.0 -970615.0 -6883104.0 -75692436.0 83546152.0
.. ... ... ... ... ... ... ...
235 600519 2021-07-29 14:56 -918956019.0 -1299630.0 920255661.0 -397127393.0 -521828626.0
236 600519 2021-07-29 14:57 -920977761.0 -2319213.0 923296987.0 -397014702.0 -523963059.0
237 600519 2021-07-29 14:58 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
238 600519 2021-07-29 14:59 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
239 600519 2021-07-29 15:00 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0
"""
df = get_today_bill_for_stock(stock_code)
df.rename(columns={
'代码': '股票代码',
'名称': '股票名称'
}, inplace=True)
return df
@to_numeric
def get_latest_quote(stock_codes: List[str]) -> pd.DataFrame:
"""
获取沪深市场多只股票的实时涨幅情况
Parameters
----------
stock_codes : List[str]
多只股票代码列表
Returns
-------
DataFrame
沪深市场、港股、美股多只股票的实时涨幅情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_quote(['600519','300750'])
股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 市场类型
0 600519 贵州茅台 0.59 1700.04 1713.0 1679.0 1690.0 10.04 0.30 0.72 43.31 37905 6.418413e+09 1690.0 2135586507912 2135586507912 沪A
1 300750 宁德时代 0.01 502.05 529.9 480.0 480.0 0.05 1.37 1.75 149.57 277258 1.408545e+10 502.0 1169278366994 1019031580505 深A
Notes
-----
当需要获取多只沪深 A 股 的实时涨跌情况时,最好使用 ``efinance.stock.get_realtime_quptes``
"""
if isinstance(stock_codes, str):
stock_codes = [stock_codes]
secids: List[str] = [get_quote_id(stock_code)
for stock_code in stock_codes]
columns = EASTMONEY_QUOTE_FIELDS
fields = ",".join(columns.keys())
params = (
('OSVersion', '14.3'),
('appVersion', '6.3.8'),
('fields', fields),
('fltt', '2'),
('plat', 'Iphone'),
('product', 'EFund'),
('secids', ",".join(secids)),
('serverVersion', '6.3.6'),
('version', '6.3.8'),
)
url = 'https://push2.eastmoney.com/api/qt/ulist.np/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
rows = jsonpath(json_response, '$..diff[:]')
if rows is None:
return pd.DataFrame(columns=columns.values()).rename({
'市场编号': '市场类型'
})
df = pd.DataFrame(rows)[columns.keys()].rename(columns=columns)
df['市场类型'] = df['市场编号'].apply(lambda x: MARKET_NUMBER_DICT.get(str(x)))
del df['市场编号']
return df
@to_numeric
def get_top10_stock_holder_info(stock_code: str,
top: int = 4) -> pd.DataFrame:
"""
获取沪深市场指定股票前十大股东信息
Parameters
----------
stock_code : str
股票代码
top : int, optional
最新 top 个前 10 大流通股东公开信息, 默认为 ``4``
Returns
-------
DataFrame
个股持仓占比前 10 的股东的一些信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_top10_stock_holder_info('600519',top = 1)
股票代码 更新日期 股东代码 股东名称 持股数 持股比例 增减 变动率
0 600519 2021-03-31 80010298 中国贵州茅台酒厂(集团)有限责任公司 6.783亿 54.00% 不变 --
1 600519 2021-03-31 80637337 香港中央结算有限公司 9594万 7.64% -841.1万 -8.06%
2 600519 2021-03-31 80732941 贵州省国有资本运营有限责任公司 5700万 4.54% -182.7万 -3.11%
3 600519 2021-03-31 80010302 贵州茅台酒厂集团技术开发公司 2781万 2.21% 不变 --
4 600519 2021-03-31 80475097 中央汇金资产管理有限责任公司 1079万 0.86% 不变 --
5 600519 2021-03-31 80188285 中国证券金融股份有限公司 803.9万 0.64% -91 0.00%
6 600519 2021-03-31 78043999 深圳市金汇荣盛财富管理有限公司-金汇荣盛三号私募证券投资基金 502.1万 0.40% 不变 --
7 600519 2021-03-31 70400207 中国人寿保险股份有限公司-传统-普通保险产品-005L-CT001沪 434.1万 0.35% 44.72万 11.48%
8 600519 2021-03-31 005827 中国银行股份有限公司-易方达蓝筹精选混合型证券投资基金 432万 0.34% 新进 --
9 600519 2021-03-31 78083830 珠海市瑞丰汇邦资产管理有限公司-瑞丰汇邦三号私募证券投资基金 416.1万 0.33% 不变 --
"""
def gen_fc(stock_code: str) -> str:
"""
Parameters
----------
stock_code : str
股票代码
Returns
-------
str
指定格式的字符串
"""
_type, stock_code = get_quote_id(stock_code).split('.')
_type = int(_type)
# 深市
if _type == 0:
return f'{stock_code}02'
# 沪市
return f'{stock_code}01'
def get_public_dates(stock_code: str) -> List[str]:
"""
获取指定股票公开股东信息的日期
Parameters
----------
stock_code : str
股票代码
Returns
-------
List[str]
公开日期列表
"""
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
data = {"fc": fc}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetFirstRequest2Data'
json_response = requests.post(
url, json=data).json()
dates = jsonpath(json_response, f'$..BaoGaoQi')
if not dates:
return []
return dates
fields = {
'GuDongDaiMa': '股东代码',
'GuDongMingCheng': '股东名称',
'ChiGuShu': '持股数',
'ChiGuBiLi': '持股比例',
'ZengJian': '增减',
'BianDongBiLi': '变动率',
}
quote_id = get_quote_id(stock_code)
stock_code = quote_id.split('.')[-1]
fc = gen_fc(stock_code)
dates = get_public_dates(stock_code)
dfs: List[pd.DataFrame] = []
empty_df = pd.DataFrame(columns=['股票代码', '日期']+list(fields.values()))
for date in dates[:top]:
data = {"fc": fc, "BaoGaoQi": date}
url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetShiDaLiuTongGuDong'
response = requests.post(url, json=data)
response.encoding = 'utf-8'
items: List[dict] = jsonpath(
response.json(), f'$..ShiDaLiuTongGuDongList[:]')
if not items:
continue
df = pd.DataFrame(items)
df.rename(columns=fields, inplace=True)
df.insert(0, '股票代码', [stock_code for _ in range(len(df))])
df.insert(1, '更新日期', [date for _ in range(len(df))])
del df['IsLink']
dfs.append(df)
if len(dfs) == 0:
return empty_df
return pd.concat(dfs, axis=0, ignore_index=True)
def get_all_report_dates() -> pd.DataFrame:
"""
获取沪深市场的全部股票报告期信息
Returns
-------
DataFrame
沪深市场的全部股票报告期信息
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_all_report_dates()
报告日期 季报名称
0 2021-06-30 2021年 半年报
1 2021-03-31 2021年 一季报
2 2020-12-31 2020年 年报
3 2020-09-30 2020年 三季报
4 2020-06-30 2020年 半年报
5 2020-03-31 2020年 一季报
6 2019-12-31 2019年 年报
7 2019-09-30 2019年 三季报
8 2019-06-30 2019年 半年报
9 2019-03-31 2019年 一季报
10 2018-12-31 2018年 年报
11 2018-09-30 2018年 三季报
12 2018-06-30 2018年 半年报
13 2018-03-31 2018年 一季报
14 2017-12-31 2017年 年报
15 2017-09-30 2017年 三季报
16 2017-06-30 2017年 半年报
17 2017-03-31 2017年 一季报
18 2016-12-31 2016年 年报
19 2016-09-30 2016年 三季报
20 2016-06-30 2016年 半年报
21 2016-03-31 2016年 一季报
22 2015-12-31 2015年 年报
24 2015-06-30 2015年 半年报
25 2015-03-31 2015年 一季报
26 2014-12-31 2014年 年报
27 2014-09-30 2014年 三季报
28 2014-06-30 2014年 半年报
29 2014-03-31 2014年 一季报
30 2013-12-31 2013年 年报
31 2013-09-30 2013年 三季报
32 2013-06-30 2013年 半年报
33 2013-03-31 2013年 一季报
34 2012-12-31 2012年 年报
35 2012-09-30 2012年 三季报
36 2012-06-30 2012年 半年报
37 2012-03-31 2012年 一季报
38 2011-12-31 2011年 年报
39 2011-09-30 2011年 三季报
"""
fields = {
'REPORT_DATE': '报告日期',
'DATATYPE': '季报名称'
}
params = (
('type', 'RPT_LICO_FN_CPD_BBBQ'),
('sty', ','.join(fields.keys())),
('p', '1'),
('ps', '2000'),
)
url = 'https://datacenter.eastmoney.com/securities/api/data/get'
response = requests.get(
url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
pd.DataFrame(columns=fields.values())
df = pd.DataFrame(items)
df = df.rename(columns=fields)
df['报告日期'] = df['报告日期'].apply(lambda x: x.split()[0])
return df
@to_numeric
def get_all_company_performance(date: str = None) -> pd.DataFrame:
"""
获取沪深市场股票某一季度的表现情况
Parameters
----------
date : str, optional
报告发布日期 部分可选示例如下(默认为 ``None``)
- ``None`` : 最新季报
- ``'2021-06-30'`` : 2021 年 Q2 季度报
- ``'2021-03-31'`` : 2021 年 Q1 季度报
Returns
-------
DataFrame
获取沪深市场股票某一季度的表现情况
Examples
---------
>>> import efinance as ef
>>> # 获取最新季度业绩表现
>>> ef.stock.get_all_company_performance()
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 688981 中芯国际 2021-08-28 00:00:00 1.609039e+10 22.253453 20.6593 5.241321e+09 278.100000 307.8042 0.6600 11.949525 5.20 26.665642 1.182556
1 688819 天能股份 2021-08-28 00:00:00 1.625468e+10 9.343279 23.9092 6.719446e+08 -14.890000 -36.8779 0.7100 11.902912 6.15 17.323263 -1.562187
2 688789 宏华数科 2021-08-28 00:00:00 4.555604e+08 56.418441 6.5505 1.076986e+08 49.360000 -7.3013 1.8900 14.926761 13.51 43.011243 1.421272
3 688681 科汇股份 2021-08-28 00:00:00 1.503343e+08 17.706987 121.9407 1.664509e+07 -13.100000 383.3331 0.2100 5.232517 4.84 47.455511 -0.232395
4 688670 金迪克 2021-08-28 00:00:00 3.209423e+07 -63.282413 -93.1788 -2.330505e+07 -242.275001 -240.1554 -0.3500 3.332254 -10.10 85.308531 1.050348
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3720 600131 国网信通 2021-07-16 00:00:00 2.880378e+09 6.787087 69.5794 2.171389e+08 29.570000 296.2051 0.1800 4.063260 4.57 19.137437 -0.798689
3721 600644 乐山电力 2021-07-15 00:00:00 1.257030e+09 18.079648 5.7300 8.379727e+07 -14.300000 25.0007 0.1556 3.112413 5.13 23.645137 0.200906
3722 002261 拓维信息 2021-07-15 00:00:00 8.901777e+08 47.505282 24.0732 6.071063e+07 68.320000 30.0596 0.0550 2.351598 2.37 37.047968 -0.131873
3723 601952 苏垦农发 2021-07-13 00:00:00 4.544138e+09 11.754570 47.8758 3.288132e+08 1.460000 83.1486 0.2400 3.888046 6.05 15.491684 -0.173772
3724 601568 北元集团 2021-07-09 00:00:00 6.031506e+09 32.543303 30.6352 1.167989e+09 61.050000 40.8165 0.3200 3.541533 9.01 27.879243 0.389860
>>> # 获取指定日期的季度业绩表现
>>> ef.stock.get_all_company_performance('2020-03-31')
股票代码 股票简称 公告日期 营业收入 营业收入同比增长 营业收入季度环比 净利润 净利润同比增长 净利润季度环比 每股收益 每股净资产 净资产收益率 销售毛利率 每股经营现金流量
0 605033 美邦股份 2021-08-25 00:00:00 2.178208e+08 NaN NaN 4.319814e+07 NaN NaN 0.4300 NaN NaN 37.250416 NaN
1 301048 金鹰重工 2021-07-30 00:00:00 9.165528e+07 NaN NaN -2.189989e+07 NaN NaN NaN NaN -1.91 20.227118 NaN
2 001213 中铁特货 2021-07-29 00:00:00 1.343454e+09 NaN NaN -3.753634e+07 NaN NaN -0.0100 NaN NaN -1.400708 NaN
3 605588 冠石科技 2021-07-28 00:00:00 1.960175e+08 NaN NaN 1.906751e+07 NaN NaN 0.3500 NaN NaN 16.324650 NaN
4 688798 艾为电子 2021-07-27 00:00:00 2.469943e+08 NaN NaN 2.707568e+07 NaN NaN 0.3300 NaN 8.16 33.641934 NaN
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
4440 603186 华正新材 2020-04-09 00:00:00 4.117502e+08 -6.844813 -23.2633 1.763252e+07 18.870055 -26.3345 0.1400 5.878423 2.35 18.861255 0.094249
4441 002838 道恩股份 2020-04-09 00:00:00 6.191659e+08 -8.019810 -16.5445 6.939886e+07 91.601624 76.7419 0.1700 2.840665 6.20 22.575224 0.186421
4442 600396 金山股份 2020-04-08 00:00:00 2.023133e+09 0.518504 -3.0629 1.878432e+08 114.304022 61.2733 0.1275 1.511012 8.81 21.422393 0.085698
4443 002913 奥士康 2020-04-08 00:00:00 4.898977e+08 -3.883035 -23.2268 2.524717e+07 -47.239162 -58.8136 0.1700 16.666749 1.03 22.470020 0.552624
4444 002007 华兰生物 2020-04-08 00:00:00 6.775414e+08 -2.622289 -36.1714 2.472864e+08 -4.708821 -22.6345 0.1354 4.842456 3.71 61.408522 0.068341
Notes
-----
当输入的日期不正确时,会输出可选的日期列表。
你也可以通过函数 ``efinance.stock.get_all_report_dates`` 来获取可选日期
"""
# TODO 加速
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票简称',
'NOTICE_DATE': '公告日期',
'TOTAL_OPERATE_INCOME': '营业收入',
'YSTZ': '营业收入同比增长',
'YSHZ': '营业收入季度环比',
'PARENT_NETPROFIT': '净利润',
'SJLTZ': '净利润同比增长',
'SJLHZ': '净利润季度环比',
'BASIC_EPS': '每股收益',
'BPS': '每股净资产',
'WEIGHTAVG_ROE': '净资产收益率',
'XSMLL': '销售毛利率',
'MGJYXJJE': '每股经营现金流量'
# 'ISNEW':'是否最新'
}
dates = get_all_report_dates()['报告日期'].to_list()
if date is None:
date = dates[0]
if date not in dates:
rich.print('日期输入有误,可选日期如下:')
rich.print(dates)
return pd.DataFrame(columns=fields.values())
date = f"(REPORTDATE=\'{date}\')"
page = 1
dfs: List[pd.DataFrame] = []
while 1:
params = (
('st', 'NOTICE_DATE,SECURITY_CODE'),
('sr', '-1,-1'),
('ps', '500'),
('p', f'{page}'),
('type', 'RPT_LICO_FN_CPD'),
('sty', 'ALL'),
('token', '<KEY>'),
# ! 只选沪深A股
('filter',
f'(SECURITY_TYPE_CODE in ("058001001","058001008")){date}'),
)
url = 'http://datacenter-web.eastmoney.com/api/data/get'
response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
dfs.append(df)
page += 1
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = pd.concat(dfs, axis=0, ignore_index=True)
df = df.rename(columns=fields)[fields.values()]
return df
@to_numeric
def get_latest_holder_number() -> pd.DataFrame:
"""
获取沪深A股市场最新公开的股东数目变化情况
Returns
-------
DataFrame
沪深A股市场最新公开的股东数目变化情况
Examples
--------
>>> import efinance as ef
>>> ef.stock.get_latest_holder_number()
股票代码 股票名称 股东人数 股东人数增减 较上期变化百分比 股东户数统计截止日 户均持股市值 户均持股数量 总市值 总股本 公告日期
0 688981 中芯国际 347706 -3.459784 -12461.0 2021-06-30 00:00:00 3.446469e+05 5575.005896 1.198358e+11 1938463000 2021-08-28 00:00:00
1 688819 天能股份 36749 -11.319981 -4691.0 2021-06-30 00:00:00 1.176868e+06 26452.420474 4.324873e+10 972100000 2021-08-28 00:00:00
2 688575 亚辉龙 7347 -74.389989 -21341.0 2021-06-30 00:00:00 2.447530e+06 55124.540629 1.798200e+10 405000000 2021-08-28 00:00:00
3 688538 和辉光电 383993 -70.245095 -906527.0 2021-06-30 00:00:00 1.370180e+05 35962.732719 5.261396e+10 13809437625 2021-08-28 00:00:00
4 688425 铁建重工 311356 -64.684452 -570284.0 2021-06-30 00:00:00 1.010458e+05 16510.746541 3.146121e+10 5140720000 2021-08-28 00:00:00
.. ... ... ... ... ... ... ... ... ... ... ...
400 600618 氯碱化工 45372 -0.756814 -346.0 2014-06-30 00:00:00 1.227918e+05 16526.491581 5.571311e+09 749839976 2014-08-22 00:00:00
401 601880 辽港股份 89923 -3.589540 -3348.0 2014-03-31 00:00:00 9.051553e+04 37403.111551 8.139428e+09 3363400000 2014-04-30 00:00:00
402 600685 中船防务 52296 -4.807325 -2641.0 2014-03-11 00:00:00 1.315491e+05 8384.263691 6.879492e+09 438463454 2014-03-18 00:00:00
403 000017 深中华A 21358 -10.800200 -2586.0 2013-06-30 00:00:00 5.943993e+04 14186.140556 1.269518e+09 302987590 2013-08-24 00:00:00
404 601992 金隅集团 66736 -12.690355 -9700.0 2013-06-30 00:00:00 2.333339e+05 46666.785918 1.557177e+10 3114354625 2013-08-22 00:00:00
"""
dfs: List[pd.DataFrame] = []
page = 1
fields = {
'SECURITY_CODE': '股票代码',
'SECURITY_NAME_ABBR': '股票名称',
'HOLDER_NUM': '股东人数',
'HOLDER_NUM_RATIO': '股东人数增减',
'HOLDER_NUM_CHANGE': '较上期变化百分比',
'END_DATE': '股东户数统计截止日',
'AVG_MARKET_CAP': '户均持股市值',
'AVG_HOLD_NUM': '户均持股数量',
'TOTAL_MARKET_CAP': '总市值',
'TOTAL_A_SHARES': '总股本',
'HOLD_NOTICE_DATE': '公告日期'
}
while 1:
params = (
('sortColumns', 'HOLD_NOTICE_DATE,SECURITY_CODE'),
('sortTypes', '-1,-1'),
('pageSize', '500'),
('pageNumber', page),
('reportName', 'RPT_HOLDERNUMLATEST'),
('columns', 'SECURITY_CODE,SECURITY_NAME_ABBR,END_DATE,INTERVAL_CHRATE,AVG_MARKET_CAP,AVG_HOLD_NUM,TOTAL_MARKET_CAP,TOTAL_A_SHARES,HOLD_NOTICE_DATE,HOLDER_NUM,PRE_HOLDER_NUM,HOLDER_NUM_CHANGE,HOLDER_NUM_RATIO,END_DATE,PRE_END_DATE'),
('quoteColumns', 'f2,f3'),
('source', 'WEB'),
('client', 'WEB'),
#! 只选沪深A股
('filter',
f'(SECURITY_TYPE_CODE in ("058001001","058001008"))'),
)
response = session.get('http://datacenter-web.eastmoney.com/api/data/v1/get',
headers=EASTMONEY_REQUEST_HEADERS,
params=params)
items = jsonpath(response.json(), '$..data[:]')
if not items:
break
df = pd.DataFrame(items)
df = df.rename(columns=fields)[fields.values()]
page += 1
dfs.append(df)
if len(dfs) == 0:
df = pd.DataFrame(columns=fields.values())
return df
df = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 = | pd.merge(df7, df_2018, on='hospid') | pandas.merge |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert o is None
else:
assert_series_equal(o, e)
def test_resample():
index = pd.date_range(start='20190101', freq='15min', periods=5)
arg = pd.Series([1, 0, 0, 0, 2], index=index)
idx_exp = pd.date_range(start='20190101', freq='1h', periods=2)
expected = pd.Series([0.25, 2.], index=idx_exp)
out = forecast.resample(arg)
assert_series_equal(out, expected)
assert forecast.resample(None) is None
@pytest.fixture
def rfs_series():
return pd.Series([1, 2],
index=pd.DatetimeIndex(['20190101 01', '20190101 02']))
@pytest.mark.parametrize(
'start,end,start_slice,end_slice,fill_method,exp_val,exp_idx', [
(None, None, None, None, 'interpolate', [1, 1.5, 2],
['20190101 01', '20190101 0130', '20190101 02']),
('20190101', '20190101 0230', None, None, 'interpolate',
[1, 1, 1, 1.5, 2, 2],
['20190101', '20190101 0030', '20190101 01', '20190101 0130',
'20190101 02', '20190101 0230']),
('20190101', '20190101 02', '20190101 0030', '20190101 0130', 'bfill',
[1., 1, 2], ['20190101 0030', '20190101 01', '20190101 0130'])
]
)
def test_reindex_fill_slice(rfs_series, start, end, start_slice, end_slice,
fill_method, exp_val, exp_idx):
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_some_nan():
rfs_series = pd.Series([1, 2, None, 4], index=pd.DatetimeIndex([
'20190101 01', '20190101 02', '20190101 03', '20190101 04',
]))
start, end, start_slice, end_slice, fill_method = \
None, None, None, None, 'interpolate'
exp_val = [1, 1.5, 2, 2.5, 3, 3.5, 4]
exp_idx = [
'20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03', '20190101 0330', '20190101 04']
exp = pd.Series(exp_val, index=pd.DatetimeIndex(exp_idx))
out = forecast.reindex_fill_slice(
rfs_series, freq='30min', start=start, end=end,
start_slice=start_slice, end_slice=end_slice, fill_method=fill_method)
assert_series_equal(out, exp)
def test_reindex_fill_slice_all_nan():
arg = pd.Series([None]*3, index=pd.DatetimeIndex(
['20190101 01', '20190101 02', '20190101 03']))
out = forecast.reindex_fill_slice(arg, freq='30min')
exp = pd.Series([None]*5, index=pd.DatetimeIndex(
['20190101 01', '20190101 0130', '20190101 02', '20190101 0230',
'20190101 03']))
assert_series_equal(out, exp)
def test_reindex_fill_slice_empty():
out = forecast.reindex_fill_slice(pd.Series(dtype=float), freq='30min')
assert_series_equal(out, pd.Series(dtype=float))
def test_reindex_fill_slice_none():
out = forecast.reindex_fill_slice(None, freq='30min')
assert out is None
def test_cloud_cover_to_ghi_linear():
cloud_cover = pd.Series([0, 50, 100.])
ghi_clear = pd.Series([1000, 1000, 1000.])
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear)
expected = pd.Series([1000, 675, 350.])
assert_series_equal(out, expected)
out = forecast.cloud_cover_to_ghi_linear(cloud_cover, ghi_clear, offset=20)
expected = pd.Series([1000, 600, 200.])
assert_series_equal(out, expected)
@pytest.mark.xfail(raises=AssertionError, strict=True)
def test_cloud_cover_to_irradiance_ghi_clear():
index = pd.date_range(start='20190101', periods=3, freq='1h')
cloud_cover = pd.Series([0, 50, 100.], index=index)
ghi_clear = pd.Series([10, 10, 1000.], index=index)
zenith = pd.Series([90.0, 89.9, 45], index=index)
out = forecast.cloud_cover_to_irradiance_ghi_clear(
cloud_cover, ghi_clear, zenith
)
# https://github.com/pvlib/pvlib-python/issues/681
ghi_exp = | pd.Series([10., 6.75, 350.]) | pandas.Series |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
self.assertRaises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
with warnings.catch_warnings(record=True):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pydata/pandas/pull/9783
"""
def setUp(self):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
self.assertTrue(block.values.flags.writeable)
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(self, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with patch(compress_module, 'decompress', decompress), \
tm.assert_produces_warning(PerformanceWarning) as ws:
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
self.assertTrue(block.values.flags.writeable)
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
self.assertEqual(
str(w.message),
'copying data after decompressing; this may mean that'
' decompress is caching its result',
)
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
self.assertEqual(buf, control_buf)
def test_compression_warns_when_decompress_caches_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_compression_warns_when_decompress_caches('zlib')
def test_compression_warns_when_decompress_caches_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_compression_warns_when_decompress_caches('blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
self.assertTrue(empty_unpacked.flags.writeable)
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
self.assertTrue(char_unpacked.flags.writeable)
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
self.assertEqual(ord(b'a'), ord(u'a'))
tm.assert_numpy_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
if not self._SQLALCHEMY_INSTALLED:
raise nose.SkipTest('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
if not self._SQLALCHEMY_INSTALLED:
raise nose.SkipTest('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setUp(self):
super(TestEncoding, self).setUp()
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in | compat.itervalues(self.frame) | pandas.compat.itervalues |
from __future__ import print_function
from __future__ import division
import pandas as pd
import numpy as np
import click
import glob
import os
import sys
READ_CUTOFF = 2
SAMPLE_CUTOFF = 1
CIRC_HEADER = [
'chrom',
'start',
'end',
'name',
'score',
'strand',
'thickStart',
'thickEnd',
'itemRgb',
'exonCount',
'exonSizes',
'exonOffsets',
'readNumber',
'circType',
'geneName',
'isoformName',
'index',
'flankIntron'
]
OUT_COL = [
'circ_name',
'chrom',
'start',
'end',
'strand',
'exonCount',
'length',
'flankIntron',
'flankIntronSizeUP',
'flankIntronSizeDOWN',
'circType',
'isoformName',
'geneName',
'gene_name',
'transcript_biotype',
'sample_id',
'readNumber'
]
OUT_COL_NAME = [
'circRNAID',
'chrom',
'start',
'end',
'strand',
'exonCount',
'length',
'flankIntron',
'flankIntronSizeUP',
'flankIntronSizeDOWN',
'circType',
'isoformName',
'geneID',
'geneSymbol',
'transcriptBiotype',
'sampleID',
'readNumber'
]
SP_EN_NAME = {
'mus_musculus': 'Mouse',
'canis_familiaris': 'Dog',
'cavia_porcellus': 'Guinea_pig',
'felis_catus': 'Cat',
'macaca_mulatta': 'Monkey',
'oryctolagus_cuniculus': 'Rabbit',
'rattus_norvegicus': 'Rat',
'ovis_aries': 'Sheep',
'gallus_gallus': 'Chicken',
'sus_scrofa': 'Pig',
'wheat': 'wheat'
}
def read_circ_table(circ_dir, suffix='circularRNA_known.txt'):
circ_tables = glob.glob('{d}/*{s}'.format(d=circ_dir, s=suffix))
circ_df_list = list()
for each_file in circ_tables:
each_df = pd.read_table(each_file, header=None, names=CIRC_HEADER)
sp_name = os.path.basename(each_file).split('.')[0]
each_df.loc[:, 'sample_id'] = sp_name
circ_df_list.append(each_df)
circ_df = pd.concat(circ_df_list)
return circ_df
def get_circ_stat(stats_df, col_name, method='mean'):
all_num = stats_df.loc[:, col_name].apply(method)
by_tr_type = stats_df.groupby(
['transcript_biotype']).agg({col_name: method})
by_tr_type.loc['Total', col_name] = all_num
return by_tr_type
def get_circ_num(stats_df):
all_num = pd.Series(len(stats_df), index=['Total'])
by_tr_type = stats_df.transcript_biotype.value_counts()
# circ_gene_type_num = stats_df.groupby(['gene_biotype', 'circType']).size()
# circ_gene_type_num.name = 'circ_count'
# circ_gene_type_num = circ_gene_type_num.reset_index()
# circ_gene_type_num.loc[:, 'name'] = circ_gene_type_num.circType + \
# '-' + circ_gene_type_num.gene_biotype
# by_gene_circ_type = pd.Series(
# circ_gene_type_num.circ_count.values, index=circ_gene_type_num.name)
circ_num_stats = pd.concat(
[all_num, by_tr_type]
)
circ_num_stats.name = 'number'
return circ_num_stats
def get_host_gene_num(stats_df):
all_num = pd.Series(len(stats_df.geneName.unique()), index=['Total'])
ids_by_circ_type = stats_df.groupby('transcript_biotype')[
'geneName'].unique()
by_tr_type = pd.Series(map(len, ids_by_circ_type),
index=ids_by_circ_type.index)
# circ_gene_type_num = stats_df.groupby(['gene_biotype', 'circType'])[
# 'geneName'].unique()
# num_by_gene_circ_type = map(len, circ_gene_type_num)
# circ_gene_type_num = circ_gene_type_num.reset_index()
# circ_gene_type_num.loc[:, 'name'] = circ_gene_type_num.circType + \
# '-' + circ_gene_type_num.gene_biotype
# by_gene_circ_type = pd.Series(
# num_by_gene_circ_type, index=circ_gene_type_num.name
# )
host_gene_stats = pd.concat(
[all_num, by_tr_type]
)
host_gene_stats.name = 'hostGene'
return host_gene_stats
def exonSizes_to_len(exonsizes):
exons = [int(each) for each in exonsizes.split(',')]
return sum(exons)
def circ_flank_intron(circ_df):
def flankIntron2size(flankIntron, strand):
# 1:7089216-7120193|1:7163371-7169514
intron_list = flankIntron.split('|')
if len(intron_list) == 1:
return [np.nan, np.nan]
try:
intron_cor_list = [each.split(':')[1].split('-')
if each != 'None'
else np.nan
for each in intron_list]
except IndexError:
print(intron_list)
sys.exit(1)
try:
intron_size = [int(each[1]) - int(each[0])
if isinstance(each, list)
else each
for each in intron_cor_list]
except TypeError:
print(intron_cor_list)
sys.exit(1)
if strand == '-':
intron_size = intron_size[::-1]
return intron_size
try:
tmp = map(flankIntron2size,
circ_df.flankIntron,
circ_df.strand)
circ_df.loc[:, 'flankIntronSizeUP'] = [each[0] for each
in tmp]
circ_df.loc[:, 'flankIntronSizeDOWN'] = [each[1] for each
in tmp]
# circ_df.loc[:, 'flankIntronSize'] = tmp
# circ_df.loc[:, 'flankIntronSize'] = map(flankIntron2size,
# circ_df.flankIntron,
# circ_df.strand)
except ValueError:
print(len(circ_df.flankIntron), len(circ_df.strand), len(circ_df))
sys.exit(1)
return circ_df
# circ_df.loc[:, 'flankIntronSizeUP'] = [each[0] for each
# in circ_df.flankIntronSize]
# circ_df.loc[:, 'flankIntronSizeDOWN'] = [each[1] for each
# in circ_df.flankIntronSize]
@click.command()
@click.argument(
'circ_dir',
type=click.Path(file_okay=False, exists=True),
required=True
)
@click.argument(
'gene_type',
type=click.Path(dir_okay=False, exists=True),
required=True
)
@click.argument(
'tissue_sample',
type=click.Path(dir_okay=False, exists=True),
required=True
)
@click.argument(
'mapping_summary',
type=click.Path(dir_okay=False, exists=True),
required=True
)
@click.argument(
'exp_table',
type=click.Path(dir_okay=False, exists=True),
required=True
)
@click.argument(
'out_dir',
type=click.Path(file_okay=False),
default=os.getcwd()
)
@click.option(
'-s',
'--species',
type=click.STRING,
help='species latin',
required=True
)
@click.option(
'-a',
'--abbr',
type=click.STRING,
help='species abbr',
required=True
)
@click.option(
'-t',
'--circ_type',
type=click.Choice(['all_circ', 'circRNA', 'ciRNA']),
default='all_circ'
)
@click.option(
'--sup',
type=click.Path(dir_okay=False, exists=True),
default=None,
help='supplementary information to add to the end of output.'
)
def main(circ_dir, gene_type, out_dir, species, exp_table,
tissue_sample, mapping_summary, circ_type, abbr,
sup):
# make sure output dir exists
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
sp_en_name = SP_EN_NAME[species]
combined_circ_out = os.path.join(circ_dir, 'circ.combined.txt')
if not os.path.isfile(combined_circ_out):
circ_df = read_circ_table(circ_dir)
circ_df.to_csv(combined_circ_out, index=False, sep='\t')
else:
circ_df = pd.read_table(combined_circ_out)
circ_df = circ_df[circ_df.readNumber >= READ_CUTOFF]
if circ_type != 'all_circ':
circ_df = circ_df[circ_df.circType == circ_type]
gene_type_df = pd.read_table(gene_type)
circ_type_df = pd.merge(circ_df, gene_type_df,
left_on='isoformName', right_on='transcript_id',
how='left')
circ_type_df = circ_flank_intron(circ_type_df)
def get_circ_basic_stats(circ_type_df):
stats_type_df = circ_type_df.loc[:, ['chrom', 'start', 'end',
'circType', 'exonSizes',
'exonCount', 'transcript_biotype',
'geneName', 'flankIntronSizeUP',
'flankIntronSizeDOWN']].drop_duplicates()
stats_type_df.loc[:, 'length'] = stats_type_df.exonSizes.map(
exonSizes_to_len)
circ_num_stats = get_circ_num(stats_type_df)
exon_stats = get_circ_stat(stats_type_df,
'exonCount')
circ_len_stats = get_circ_stat(stats_type_df,
'length')
f_intron_up_stats = get_circ_stat(stats_type_df,
'flankIntronSizeUP')
f_intron_down_stats = get_circ_stat(stats_type_df,
'flankIntronSizeDOWN')
host_gene_stats = get_host_gene_num(stats_type_df)
junc_reads_stats = get_circ_stat(circ_type_df,
'readNumber', 'sum')
circ_merged_stats = pd.concat(
[circ_num_stats, exon_stats, circ_len_stats,
f_intron_up_stats, f_intron_down_stats,
host_gene_stats, junc_reads_stats], axis=1)
circ_merged_stats.index.name = 'Category'
return circ_merged_stats
circ_merged_stats = get_circ_basic_stats(circ_type_df)
stats_out_file = os.path.join(out_dir, '{sp}.{t}.stats.txt'.format(
t=circ_type,
sp=sp_en_name))
circ_merged_stats.to_csv(stats_out_file, sep='\t', float_format='%.2f',
na_rep='None')
# sample summary
sample_stats_file = os.path.join(
out_dir, '{t}.stats.sample.txt'.format(t=circ_type))
sample_stats_list = list()
for each_sample in circ_type_df.sample_id.unique():
each_sample_df = circ_type_df[circ_type_df.sample_id == each_sample]
each_sample_stats = get_circ_basic_stats(each_sample_df)
each_sample_stats.loc[:, 'sample_id'] = each_sample
sample_stats_list.append(each_sample_stats)
sample_df = pd.concat(sample_stats_list)
# sample_df = sample_df.reset_index().set_index(['sample_id', 'Category'])
# sample_out_df = sample_df.unstack('Category')
# mapping reads info
mapping_summary_df = pd.read_table(mapping_summary)
mapping_summary_df = mapping_summary_df.loc[:, ['Sample', 'total']]
mapping_summary_df.columns = ['sample_id', 'Total_reads']
sample_out_df = sample_df.reset_index().set_index(
['sample_id', 'Category'])
sample_out_df = sample_out_df.unstack(
'Category').fillna(0).stack('Category')
sample_out_df = pd.merge(sample_out_df.reset_index(), mapping_summary_df,
how='left')
sample_out_df.loc[:, 'readNumberPortion(%)'] = sample_out_df.readNumber / \
sample_out_df.Total_reads * 100
sample_out_df.loc[:, 'Species'] = sp_en_name
sample_out_df = sample_out_df.set_index('Species')
sample_out_df.to_csv(sample_stats_file, sep='\t',
float_format='%.3f', na_rep=0,
header=False)
# tissue summary
tissue_df = pd.read_table(tissue_sample, names=[
'tissue', 'sample_id'], header=None)
tissue_sample_num = tissue_df.groupby('tissue').size()
circ_tissue_type_df = pd.merge(circ_type_df, tissue_df,
how='left')
# circ_ids = ['{c}:{s}-{e}'.format(c=circ_tissue_type_df.loc[each, 'chrom'],
# s=circ_tissue_type_df.loc[each, 'start'],
# e=circ_tissue_type_df.loc[each, 'end'])
# for each in circ_tissue_type_df.index]
# circ_tissue_type_df.loc[:, 'circ_id'] = circ_ids
# multi_sample_tissues = tissue_sample_num[tissue_sample_num > 1].index
# if multi_sample_tissues.empty:
# pass
# else:
# circ_by_tissue = circ_tissue_type_df.groupby(
# ['tissue', 'circ_id']).size()
# circ_by_tissue_part1 = circ_by_tissue[circ_by_tissue > READ_CUTOFF]
# circ_by_tissue_part1_df = circ_tissue_type_df.set_index(
# ['tissue', 'circ_id'])
# circ_by_tissue_part1_df = circ_by_tissue_part1_df.loc[
# circ_by_tissue_part1.index].reset_index()
# single_sample_tissues = tissue_sample_num[tissue_sample_num == 1].index
# circ_by_tissue_part2_df = circ_tissue_type_df[
# circ_tissue_type_df.tissue.isin(single_sample_tissues)]
# circ_tissue_type_df = pd.concat(
# [circ_by_tissue_part1_df, circ_by_tissue_part2_df])
# tissue_stats_list = list()
# tissue_stats_file = os.path.join(
# out_dir, '{t}.stats.tissue.txt'.format(t=circ_type))
# for each_tissue in tissue_sample_num.index:
# each_tissue_df = circ_tissue_type_df[
# circ_tissue_type_df.tissue == each_tissue]
# each_tissue_stats = get_circ_basic_stats(each_tissue_df)
# each_tissue_stats.loc[:, 'tissue'] = each_tissue
# tissue_stats_list.append(each_tissue_stats)
# tissue_stats_df = pd.concat(tissue_stats_list)
# tissue_stats_df = tissue_stats_df.reset_index(
# ).set_index(['tissue', 'Category'])
# tissue_out_df = tissue_stats_df.unstack('Category')
# tissue_out_df.to_csv(tissue_stats_file, sep='\t',
# float_format='%.2f', na_rep=0)
# tissue circ_table
circ_tissue_type_df = circ_tissue_type_df.sort_values(
['chrom', 'start', 'end'])
circ_name = circ_tissue_type_df.groupby(['chrom', 'start', 'end']).size()
circ_name.name = 'circ_count'
circ_name = circ_name.reset_index()
circ_name.loc[:, 'circ_name'] = [
'{sp}_circ_{num:0>6}'.format(sp=abbr,
num=each + 1)
for each in circ_name.index]
circ_name_df = pd.merge(circ_tissue_type_df, circ_name)
circ_name_df.loc[:, 'length'] = circ_name_df.exonSizes.map(
exonSizes_to_len)
exp_table_df = | pd.read_table(exp_table, index_col=0) | pandas.read_table |
from fctest.__EISData__ import EISData
import pandas as pd
import os
import numpy as np
class AutoLEISData(EISData):
ENCODING = "ISO-8859-1"
def __init__(self, data_path, mea_area):
raw_data = pd.read_csv(data_path, sep='\t')
raw_data = raw_data.iloc[:, 0].str.split(',', expand=True)
data_section = raw_data.iloc[10:, :]
data_section.columns = ['freq', 'ampl', 'bias', 'time', 'z_re', 'z_im', 'gd', 'err', 'range']
# test_date = pd.to_datetime(raw_data.iloc[2, 2] + ' ' + raw_data.iloc[3, 2])
# mea_area = float(raw_data.iloc[12, 2])
# initial_freq = float(raw_data.iloc[8, 2])
# final_freq = float(raw_data.iloc[9, 2])
# pts_per_decade = float(raw_data.iloc[10, 2])
# relevant parameters
freqs = pd.to_numeric(data_section.freq).values
z_real = pd.to_numeric(data_section.z_re).values
z_im = | pd.to_numeric(data_section.z_im) | pandas.to_numeric |
"""
step04.py: Clearify and Merge Synapse Data
"""
import argparse
import pandas
import step00
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("clinical", type=str, help="Clinical CSV file")
parser.add_argument("expression", type=str, help="Expression CSV file(s)", nargs="+")
parser.add_argument("output", type=str, help="Output TAR.gz file")
args = parser.parse_args()
if not args.clinical.endswith(".csv"):
raise ValueError("CLINICAL must end with .csv!!")
elif list(filter(lambda x: not x.endswith(".csv"), args.expression)):
raise ValueError("CLINICAL must end with .csv!!")
# read data
clinical_data = pandas.read_csv(args.clinical)
clinical_data.set_index("patientID", inplace=True)
clinical_data["ECOGPS"] = list(map(lambda x: float(x) if step00.can_convert_to_float(x) else None, list(clinical_data["ECOGPS"])))
clinical_data["TMB"] = list(map(lambda x: float(x) if step00.can_convert_to_float(x) else None, list(clinical_data["TMB"])))
clinical_data.columns = list(map(lambda x: "Clinical_" + x, list(clinical_data.columns)))
clinical_data.sort_index(axis="index", inplace=True)
data_list = [clinical_data]
for i, expression_file in enumerate(sorted(args.expression)):
tmp_data = pandas.read_csv(expression_file)
tmp_data.set_index(list(tmp_data.columns)[0], inplace=True)
tmp_data = tmp_data.T
tmp_data.columns = list(map(lambda x: str(i) + "_" + x, list(tmp_data.columns)))
tmp_data.sort_index(axis="index", inplace=True)
data_list.append(tmp_data)
output_data = | pandas.concat(data_list, axis="columns", join="inner", verify_integrity=True) | pandas.concat |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# Import modules
from models import get_model_cnn_crf,get_model,get_model_cnn
import numpy as np
from utils import gen, chunker, WINDOW_SIZE, rescale_array, rescale_wake
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.models import load_model
from sklearn.metrics import f1_score, accuracy_score, classification_report
import tensorflow as tf
from glob2 import glob
import os
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import matplotlib # Added to patch mpl import bug for Python 2
matplotlib.use('agg') # Added to patch mpl import bug for Python 2
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import pandas as pd
np.set_printoptions(precision=2)
#This Python script trains a model to predict sleep stages.
#It contains 4 functions:
# Training the model: train_model()
# Calculate Accuracy of the model: eval_model()
# Cross validate the model: cross_validation_training()
# Repeats 20 times train & test pipeline for 20 possible partitions between train & test
# ---> This is only needed if we want to validate our model but not necessary for deployment.
# Training the model with all the data: full_training()
def train_model(train_files, model_save_path):
""" Main function to train model.
Inputs:
- train_files: files we will use to train our model.
- model_save_path: path to save the model.
"""
#Split the data between training and validation. The variable "test_size" is 0.1 (10%) the percentage for validation.
# ---> train_test_split is a function that randomly splits the data. For now we won't cherry pick our random state.
train,val=train_test_split(train_files, test_size=0.1)#, random_state=1337)
# Load all our train data
train_dict = {k: np.load(k) for k in train}
# Load all our validation data
val_dict = {k: np.load(k) for k in val}
print("Validating: "+str(val_dict))
#The model architecture has 3 repeated sets of two 1-D convolutional (Conv1D) layers, 1-D max-pooling and spatial dropout layers.
# This is followed by two Conv1D, 1-D global max-pooling, dropout and dense layers. We finally have a dropout layer as the output of "Base-CNN".
# This is fed to the Time-Distributed Base-CNN model, and then a 1-D convolutional layer, spatial dropout, another 1-D convolutional layer, dropout, 1D conv and finally the multiclass sleep labels.
model = get_model_cnn()
# Training
#This is useful to avoid overfitting. Saves what is the best so far for validation accuracy (for every epoch).
checkpoint = ModelCheckpoint(model_save_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=20, verbose=1)
#Learning rate is reduced each time the validation accuracy plateaus using ReduceLROnPlateau Keras Callbacks.
redonplat = ReduceLROnPlateau(monitor="val_acc", mode="max", patience=5, verbose=2)
callbacks_list = [checkpoint, redonplat]
model.fit_generator(gen(train_dict, aug=False), validation_data=gen(val_dict), epochs=25, verbose=2, steps_per_epoch=1000, validation_steps=300, callbacks=callbacks_list)
#And finally we save our model!
model.save(model_save_path)
def eval_model(model , test_files):
""" Main function to evaluate (estimate accuracy) of the model.
Inputs:
- model: the model to train
- test_files: files we will use to test our model (in this case 1 person).
"""
test_dict = {k: np.load(k) for k in test_files}
print("Testing: "+str(test_dict))
#Validation
for record in tqdm(test_dict):
all_rows = test_dict[record]['x']
record_y_gt = []
record_y_pred = []
# for batch_hyp in chunker(range(all_rows.shape[0])): # for batchwise chunking
X = all_rows #[min(batch_hyp):max(batch_hyp)+1, ...]
Y = test_dict[record]['y'] #[min(batch_hyp):max(batch_hyp)+1]
wakeStd=rescale_wake(X,Y)
X = np.expand_dims(X, 0)
X=(X-np.mean(X))/wakeStd
# X = rescale(X, Y) #default
Y_pred = model.predict(X)
Y_pred = Y_pred.argmax(axis=-1).ravel().tolist()
gtNow=Y.ravel().tolist()
gt += gtNow
preds += Y_pred
record_y_gt += Y.ravel().tolist()
record_y_pred += Y_pred
acc_Test = accuracy_score(gtNow, Y_pred)
f1_Test = f1_score(gtNow, Y_pred, average="macro")
print("acc %s, f1 %s"%(acc_Test, f1_Test))
return gt, preds
# Training the model and prediction
def cross_validation_training():
""" Function to cross-validate the model.
"""
base_path = "../data/eeg_fpz_cz" # We used the expanded Sleep-EDF database from Physionet bank. Single-channel EEG (Fpz-Cz at 100Hz) of 20 subjects.
model_save_path = "Sleep_CNN_33.h5" # Model generated with 19 subjects (2 nights par subject) as our training-set (33 nights in total because it's 90%).
files = sorted(glob(os.path.join(base_path, "*.npz")))
subject_ids = list(set([x.split("/")[-1][:5] for x in files])) #Extract user ID from file name. (-1 is the last column, and :5 is the first 5 chars). The result is: 20 subject ids: SC400, SC401, SC402, SC403, SC404, SC405, SC406, SC407, SC408, SC409, SC410, SC411, SC412, SC413, SC414, SC415, SC416, SC417, SC418, SC419. list->set-> gets rid of duplicates.
list_f1 = []
list_acc = []
allpreds = []
allgt = []
#Loop over subject which will be used as test.
for i in subject_ids:
test_id = set([i]) #Test id
all_subjects = set(subject_ids)
train_ids = all_subjects - test_id # train_ids substracts the ids for all subjects minus 1 user (set([i]))
#From the ids that we have found, let's get the actual files and collect the data for training and test.
train_files, test_files = [x for x in files if x.split("/")[-1][:5] in train_ids],\
[x for x in files if x.split("/")[-1][:5] in test_id]
#Once we know the ids we will use for our train data:
#LET'S TRAIN THE MODEL!
train_model(train_files, model_save_path) #It trains the model and saves it in the same path.
#Once we have our model:
#LET'S EVALUATE THE MODEL (for this specific subject)!
gt, preds = eval_model(load_model(model_save_path), test_files)
allpreds += preds
allgt += gt
f1 = f1_score(allgt, allpreds, average="macro")
| pd.DataFrame(allgt) | pandas.DataFrame |
""" module generates schemas and lookups used in dropdowns """
__author__ = 'etuka'
from django.core.management.base import BaseCommand
import os
import glob
import json
import pandas as pd
import xml.etree.ElementTree as ET
from dal.copo_base_da import DataSchemas
from dal.mongo_util import get_collection_ref
from web.apps.web_copo.lookup.resolver import RESOLVER
import web.apps.web_copo.schemas.utils.data_utils as d_utils
from web.apps.web_copo.schemas.utils.data_formats import DataFormats
Schemas = get_collection_ref("Schemas")
Lookups = get_collection_ref("Lookups")
drop_downs_pth = RESOLVER['copo_drop_downs']
class Command(BaseCommand):
help = 'Generate UI schemas and dropdown lookups'
def handle(self, *args, **options):
# generate ui schemas
self.generate_ui_schemas()
self.convert_crp_list()
self.generate_lookup_datasource()
self.stdout.write(self.style.SUCCESS('Successfully generated schemas'))
def generate_ui_schemas(self):
"""
function generates ui schemas
:return:
"""
# instantiate data schema
data_schema = DataSchemas("COPO")
# generate core schemas
temp_dict = DataFormats("COPO").generate_ui_template()
# store schemas in DB
if temp_dict["status"] == "success" and temp_dict["data"]:
data_schema.add_ui_template(temp_dict["data"])
return True
def convert_crp_list(self):
"""
converts cgiar crp from csv to json
:return:
"""
try:
df = pd.read_csv(os.path.join(drop_downs_pth, 'crp_list.csv'))
except Exception as e:
self.stdout.write(self.style.ERROR('Error retrieving schema resource: ' + str(e)))
return False
# 'Platform_no', 'Operating_name', 'Official_name', 'Standard_reference', 'Lead_center', 'Class'
df.columns = [x.lower() for x in df.columns]
df['label'] = df['official_name']
df['value'] = df['operating_name']
df['description'] = "<div>Platform number: " + df['platform_no'].astype(str) + \
"</div><div>Standard reference: " + df['standard_reference'].astype(str) + \
"</div><div>Operating name: " + df['operating_name'].astype(str) + \
"</div><div>Lead center: " + df['lead_center'].astype(str) + \
"</div>Class: " + df['class'].astype(str)
df = df[['label', 'value', 'description']]
result = df.to_dict('records')
try:
with open(os.path.join(drop_downs_pth, 'crp_list.json'), 'w') as fout:
json.dump(result, fout)
except Exception as e:
self.stdout.write(self.style.ERROR('Error writing crp_list.json: ' + str(e)))
return False
return True
def generate_lookup_datasource(self):
dispatcher = {
'agrovoclabels': self.agrovoc_datasource,
'countrieslist': self.countrieslist_datasource,
'mediatypelabels': self.mediatype_datasource,
'fundingbodies': self.fundingbodies_datasource
}
for k in dispatcher.keys():
# drop existing records of type
Lookups.remove({"type": k})
try:
result_df = dispatcher[k]()
result_df['type'] = k
result_df = result_df[['accession', 'label', 'description', 'type', 'tags']]
Lookups.insert_many(result_df.to_dict('records'))
except Exception as e:
print(e)
def agrovoc_datasource(self):
"""
function generates data source for Agrovoc terms lookup
:return:
"""
data = d_utils.json_to_pytype(os.path.join(drop_downs_pth, 'agrovocLabels.json'))["bindings"]
data_df = pd.DataFrame(data)
data_df['accession'] = data_df['uri'].apply(lambda x: x.get('value', str()))
data_df['label'] = data_df['label'].apply(lambda x: x.get('value', str()))
data_df['description'] = '<table style="width:100%"><tr><td>Label</td><td>' + data_df[
'label'] + '</td></tr><tr><td>Accession</td><td>' + data_df['accession'] + '</td></table>'
data_df['tags'] = [''] * len(data_df)
return data_df
def countrieslist_datasource(self):
"""
function generates data source for lookup of countries
:return:
"""
data = d_utils.json_to_pytype(os.path.join(drop_downs_pth, 'countries.json'))["bindings"]
data_df = | pd.DataFrame(data) | pandas.DataFrame |
import datetime
from time import sleep
import pandas as pd
from loguru import logger
import ofanalysis.const as const
import ofanalysis.utility as ut
import tushare as ts
class TSDataUpdate:
def __init__(self, ts_pro_token:str):
self.__pro = ts.pro_api(ts_pro_token)
self.__today = datetime.date.today()
def retrieve_all(self):
self.retrieve_stock_basic()
self.retrieve_stock_daily_basic()
self.retrieve_stock_daily()
self.retrieve_fund_basic()
self.retrieve_fund_nav()
self.retrieve_fund_share()
self.retrieve_fund_manager()
self.retrieve_fund_portfolio()
def retrieve_stock_basic(self):
logger.info('全量更新股票基础信息stock_basic')
# 分页读取数据
df_stock_basic = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.stock_basic(**{
"ts_code": "",
"name": "",
"exchange": "",
"market": "",
"is_hs": "",
"list_status": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"symbol",
"name",
"area",
"industry",
"market",
"list_date",
"is_hs",
"delist_date",
"list_status",
"curr_type",
"exchange",
"cnspell",
"enname",
"fullname"
])
if len(df_batch_result) == 0:
break
df_stock_basic = pd.concat([df_stock_basic, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
target_dict=df_stock_basic.to_dict(orient='records')
)
def retrieve_stock_daily_basic(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票每日指标stock_daily_basic')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票每日指标stock_daily_basic: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily_basic(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"trade_date",
"close",
"turnover_rate",
"turnover_rate_f",
"volume_ratio",
"pe",
"pe_ttm",
"pb",
"ps",
"ps_ttm",
"dv_ratio",
"dv_ttm",
"total_share",
"float_share",
"free_share",
"total_mv",
"circ_mv"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = | pd.concat([df_daily, df_batch_daily], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue( | com.is_float_dtype(res3['ItemE'].values) | pandas.core.common.is_float_dtype |
import datetime
from datetime import date
import pandas as pd
import numpy as np
import requests
from pandas.tseries.offsets import BDay
from mip import Model, xsum, minimize, BINARY, maximize
import re
api_key = '30d9085988663142ce4cb478d09e6d00'
def next_weekday(d, weekday):
days_ahead = weekday - d.weekday()
if days_ahead <= 0:
days_ahead += 7
return d + datetime.timedelta(days_ahead)
def get_calendar_df(event_df):
next_monday = next_weekday(date.today(), 0)
upcoming_week_df = pd.DataFrame(index=pd.date_range(next_monday, periods=5, freq='D'))
event_df = event_df.groupby('Datum').count()
cal_df = upcoming_week_df.join(event_df).fillna(0).astype(int)
return cal_df
def get_dates():
today = | pd.to_datetime('today') | pandas.to_datetime |
'''
Copyright (c) 2020, <NAME>, Sunnybrook Research Institute
Script that iwll run statistical tests for comparing tokenizers with
MannWhitney U-test or classifiers with the McNemar test.
Input: 2 or more .xlsx fiels of different model testing results.
output: NLPRR_ExperimentsSummary.csv containing a comparison of descriptive
statistics, and StatsTests_<stats test>.csv containing the multiple
comparison testing results of the chosen test.
'''
import argparse
import pandas as pd
import os
from tqdm import tqdm
from statsmodels.stats.multicomp import MultiComparison
from sklearn.metrics import confusion_matrix
from statsmodels.stats.contingency_tables import mcnemar
from scipy.stats.stats import FriedmanchisquareResult
from scipy.stats import mannwhitneyu
def mcnemar_test(a,b):
ct = confusion_matrix(a,b)
test = mcnemar(ct)
return FriedmanchisquareResult(statistic=test.statistic,
pvalue=test.pvalue)
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str,
help="number of epochs of training")
parser.add_argument("--stat_test", type=str, default='MannWhitney',
help="Adhoc test performed. ")
opt = parser.parse_args()
print('-'*80)
print(opt)
print('-'*80)
# Building a summary spread sheet of results
filelist = []
for root, dirs, files in os.walk(opt.folder):
for file in files:
if file.endswith('.xlsx'):
#append the file name to the list
filelist.append(os.path.join(root,file))
new_df = {'FieldExtraction':[],
'Model': [],
'Accuracy': [],
'G.F1': [],
'Weighted_F1': [],
'Weighted_precision': [],
'Weighted_recall': []}
for file in tqdm(filelist):
df = | pd.read_excel(file, engine='openpyxl', sheet_name='Summary_Metrics') | pandas.read_excel |
import json
import pandas as pd
with open(r'data\unique_authors_list_full.json') as json_file:
unique_authors = json.load(json_file)
with open(r'data\n_unique_authors_full.json') as json_file:
n_unique_authors = json.load(json_file)
print(f"Current len of unique authors : {len(unique_authors)}")
replacement={}
for key,val in unique_authors.items():
if n_unique_authors[key]>50:
replacement[key]=val
unique_authors=replacement
print(f"After removing len is {len(unique_authors)}")
for key, val in unique_authors.items():
authors=val.split(",")
unique_authors[key]=authors
labels=[]
columns=[]
strength_cols=[]
for key,val in unique_authors.items():
strengths=[]
column=[]
labels.append(key)
for key2,val2 in unique_authors.items():
n_common=len(set(val).intersection(val2))
strength=n_common/min(len(val),len(val2))
column.append(n_common)
strengths.append(strength)
strength_cols.append(strengths)
columns.append(column)
df_strengths= | pd.DataFrame(strength_cols,index=labels,columns=labels) | pandas.DataFrame |
# *-* coding: utf-8 *-*
"""Read binary data from the IRIS Instruments Syscal Pro system
TODO: Properly sort out handling of electrode positions and conversion to
electrode numbers.
"""
import struct
from io import StringIO
import logging
import pandas as pd
import numpy as np
from reda.importers.utils.decorators import enable_result_transforms
logger = logging.getLogger(__name__)
def _convert_coords_to_abmn_X(data, **kwargs):
"""The syscal only stores positions for the electrodes. Yet, we need to
infer electrode numbers for (a,b,m,n) by means of some heuristics. This
heuristic uses the x-coordinates to infer an electrode spacing (y/z
coordinates are ignored). We also assume a constant spacing of electrodes
(i.e., a gap in electrode positions would indicate unused electrodes). This
is usually a good estimate as hardly anybody does change the electrode
positions stored in the Syscal system (talk to us if you do).
Note that this function can use user input to simplify the process by using
a user-supplied x0 value for the smallest electrode position (corresponding
to electrode 1) and a user-supplied spacing (removing the need to infer
from the positions).
Parameters
----------
data : Nx4 array|Nx4 :py:class:`pandas.DataFrame`
The x positions of a, b, m, n electrodes. N is the number of
measurements
x0 : float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing : float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
Returns
-------
data_new : Nx4 :py:class:`pandas.DataFrame`
The electrode number columns a,b,m,n
"""
assert data.shape[1] == 4, 'data variable must only contain four columns'
x0 = kwargs.get(
'x0',
data.min().min()
)
electrode_spacing = kwargs.get('spacing', None)
# try to determine from the data itself
if electrode_spacing is None:
electrode_positions = data.values
electrode_spacing = np.abs(
electrode_positions[:, 1:] - electrode_positions[:, 0:-1]
).min()
data_new = pd.DataFrame()
data_new['a'] = (data.iloc[:, 0] - x0) / electrode_spacing + 1
data_new['b'] = (data.iloc[:, 1] - x0) / electrode_spacing + 1
data_new['m'] = (data.iloc[:, 2] - x0) / electrode_spacing + 1
data_new['n'] = (data.iloc[:, 3] - x0) / electrode_spacing + 1
# convert to integers
for col in (('a', 'b', 'm', 'n')):
data_new[col] = data_new[col].astype(int)
return data_new
@enable_result_transforms
def import_txt(filename, **kwargs):
"""
Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
"""
if 'spacing' not in kwargs:
logger.warning(' '.join((
'spacing keyword is not set.',
'Make sure that ALL electrodes are used in the data!',
'Otherwise problems will arise!',
)))
# read in text file into a buffer
with open(filename, 'r') as fid:
text = fid.read()
strings_to_replace = {
'Mixed / non conventional': 'Mixed/non-conventional',
'Date': 'Date Time AM-PM',
}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key])
buffer = StringIO(text)
# read data file
data_raw = pd.read_csv(
buffer,
# sep='\t',
delim_whitespace=True,
)
# clean up column names
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
# generate electrode positions
data = _convert_coords_to_abmn_X(
data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']],
**kwargs
)
# [mV] / [mA]
data['r'] = data_raw['Vp'] / data_raw['In']
data['Vmn'] = data_raw['Vp']
data['Iab'] = data_raw['In']
# rename electrode denotations
rec_max = kwargs.get('reciprocals', None)
if rec_max is not None:
print('renumbering electrode numbers')
data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']]
return data, None, None
@enable_result_transforms
def import_bin(filename, **kwargs):
"""
Read a .bin file generated by the IRIS Instruments Syscal Pro System and
return a curated dataframe for further processing. This dataframe contains
only information currently deemed important. Use the function
reda.importers.iris_syscal_pro_binary._import_bin to extract ALL
information from a given .bin file.
Parameters
----------
filename : string
path to input filename
x0 : float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing : float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals : int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
check_meas_nums : bool
if True, then check that the measurement numbers are consecutive. Don't
return data after a jump to smaller measurement numbers (this usually
indicates that more data points were downloaded than are part of a
specific measurement. Default: True
skip_rows : int
Ignore this number of rows at the beginning, e.g., because they were
inadvertently imported from an earlier measurement. Default: 0
Returns
-------
data : :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes : :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography : None
No topography information is contained in the text files, so we always
return None
"""
metadata, data_raw = _import_bin(filename)
skip_rows = kwargs.get('skip_rows', 0)
if skip_rows > 0:
data_raw.drop(data_raw.index[range(0, skip_rows)], inplace=True)
data_raw = data_raw.reset_index()
if kwargs.get('check_meas_nums', True):
# check that first number is 0
if data_raw['measurement_num'].iloc[0] != 0:
print('WARNING: Measurement numbers do not start with 0 ' +
'(did you download ALL data?)')
# check that all measurement numbers increase by one
if not np.all(np.diff(data_raw['measurement_num'])) == 1:
logger.warning(' '.join((
'WARNING',
'Measurement numbers are not consecutive.',
'Perhaps the first measurement belongs to another'
'measurement?',
'Use the skip_rows parameter to skip those measurements'
)))
# import IPython
# IPython.embed()
# now check if there is a jump in measurement numbers somewhere
# ignore first entry as this will always be nan
diff = data_raw['measurement_num'].diff()[1:]
jump = np.where(diff != 1)[0]
if len(jump) > 0 and not np.all(data_raw['measurement_num'] == 0):
logger.warning(
'WARNING: One or more jumps in measurement numbers detected')
logger.warning('The jump indices are:')
for jump_nr in jump:
logger.warning(jump_nr)
logger.info('Removing data points subsequent to the first jump')
data_raw = data_raw.iloc[0:jump[0] + 1, :]
if data_raw.shape[0] == 0:
# no data present, return a bare DataFrame
return | pd.DataFrame(columns=['a', 'b', 'm', 'n', 'r']) | pandas.DataFrame |
"""Runs experiments on CICIDS-2017 dataset."""
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
import sklearn
import tqdm
from tqdm import tqdm
from tqdm import tqdm_notebook
#import xgboost as xgb
from incremental_trees.models.classification.streaming_rfc import StreamingRFC
import time
import tensorflow as tf
import sys
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import os # accessing directory structure
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense
import pickle as pkl
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
nRowsRead = None
# Some hardcoded parameters:
tf.compat.v1.flags.DEFINE_integer('sample', 10000, '')
tf.compat.v1.flags.DEFINE_boolean('notebook', False, '')
tf.compat.v1.flags.DEFINE_integer('num_steps', 1, 'number of training step per new batch in online learning.')
tf.compat.v1.flags.DEFINE_integer('n_batch_to_retrain', 1, 'number of old batch to retrain in online learning.')
tf.compat.v1.flags.DEFINE_integer('batch_size', 256, '')
tf.compat.v1.flags.DEFINE_string('run', '8,9,10,11', '')
FLAGS = tf.compat.v1.flags.FLAGS
progress_bar = tqdm
df_cache = None
# A little hack
print_sys = print
def print(s):
print_sys(s)
with open('log.txt', 'a') as f:
f.write(s + '\n')
def load_data(sampled_instances=10000):
"""Returns sampled cicids data as pd.df."""
global df_cache
if df_cache is not None:
return df_cache
df1 = pd.read_csv("Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
df2 = pd.read_csv("Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv")
df3 = pd.read_csv("Friday-WorkingHours-Morning.pcap_ISCX.csv")
df4 = pd.read_csv("Monday-WorkingHours.pcap_ISCX.csv")
df5 = pd.read_csv(
"Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv")
df6 = pd.read_csv("Thursday-WorkingHours-Morning-WebAttacks.pcap_ISCX.csv")
df7 = pd.read_csv("Tuesday-WorkingHours.pcap_ISCX.csv")
df8 = pd.read_csv("Wednesday-workingHours.pcap_ISCX.csv")
df = pd.concat([df1, df2])
del df1, df2
df = pd.concat([df, df3])
del df3
df = pd.concat([df, df4])
del df4
df = pd.concat([df, df5])
del df5
df = pd.concat([df, df6])
del df6
df = | pd.concat([df, df7]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
def join_cols(row):
return ''.join(list(row))
def find_favor(seq):
t = []
for m in re.finditer(seq, DNA):
t += [m.start()]
return t
DNA = np.loadtxt('./data/DNA.txt', str)
DNA = ''.join(DNA)
print('DNA Length = {} '.format(len(DNA)) )
start_idxs = []
for m in re.finditer('GTC', DNA):
start_idxs += [m.start()]
start_idxs = np.array(start_idxs)
df = pd.DataFrame()
df['loc'] = np.arange(len(DNA))
df['start_ind'] = 0
df.loc[start_idxs,'start_ind'] = 1
favor = pd.read_csv('./data/favor_seqs.csv')
gtc_loc = list(favor.iloc[0,:])[0].find('GTC')
red_idxs = []
for detsize in range(3,4):
dets = favor['seq'].str[ gtc_loc-detsize:gtc_loc + 3 + detsize]
dets = list(np.unique(dets))
detslocs = list(map(find_favor, dets))
detslocs = [x for x in detslocs if len(x) > 1]
for tlocs in detslocs:
mean_dist = np.mean(np.diff(tlocs))
median_dist = np.median(np.diff(tlocs))
if(mean_dist > 1000 and mean_dist < 6000
or
median_dist > 1000 and median_dist < 6000):
red_idxs += [tlocs]
red_idxs = [item for sublist in red_idxs for item in sublist]
plt.figure(figsize=(16,4))
plt.bar(start_idxs, [0.3]*len(start_idxs), width=64, color='black', alpha=0.8)
plt.bar(red_idxs, [1]*len(red_idxs), width=64, color='red')
plt.ylim([0,1])
plt.xlim([0,len(DNA)])
plt.xlabel('DNA nucleotide index')
plt.yticks([])
plt.xticks([])
plt.title('\"Intresting\" Sequences')
plt.legend(['GTC Locations','Intresting Frequency Locations'], facecolor=(1,1,1,1), framealpha=0.98 )
plt.savefig('./out/favor_seqs_k_3.png')
plt.show()
#%% Prim VS Primon when POLY is saturated
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def ms(t):
return t/np.max(t)
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-4]) ))
tcols = df.columns
tcols = list(tcols[:-4]) + ['poly','prim','primo','seq']
df.columns = tcols
df['primo-prim'] = df['primo'] - df['prim']
labels = ['poly','primo','prim','primo-prim']
df = df.sort_values('poly').reset_index(drop=True)
sm = 100
plt.figure(figsize=(12,8))
for i, lab in enumerate(labels):
plt.subplot(4,1,i+1)
if(i != 3):
df = df.sort_values(lab).reset_index(drop=True)
y = df[lab].copy()
if(i != 3):
y = mms( y )**0.5
y = y.rolling(sm).mean().drop(np.arange(sm)).reset_index(drop=True)
y = pd.Series(y)
plt.plot(np.arange(len(y)),y, alpha=0.8)
plt.title(lab + ' sorted by self')
plt.ylabel(' ln(score)' )
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)
#%% Collect favorite sequences
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
labels = ['poly','primo','prim']
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-3]) ))
# keep favorite seuqnces (1000~6000 reps)
df_test = pd.read_csv('./data/validation.csv')
df.index = df['seq']
df = df.loc[df_favor['seq'],:]
df = df.dropna(axis=0).reset_index(drop=True)
df.columns = list(df.columns[:-4]) + ['poly', 'prim', 'primo', 'seq']
# keep non test set sequences
toDrop = df_test['seq']
df.index = df['seq']
df = df.drop(toDrop, axis=0, errors='ignore')
df = df.reset_index(drop=True)
print('lets unite the data by seq and watch the mean and std of each sequence')
dfm = pd.DataFrame()
dfm['primo'] = mms(df.groupby('seq').median()['primo'])
dfm['primo_std'] = mms(df.groupby('seq').std()['primo'])#/mms( df.groupby('seq').mean()['primo'] )
dfm['prim'] = mms(df.groupby('seq').median()['prim'])
dfm['prim_std'] = mms(df.groupby('seq').std()['prim'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['poly'] = mms(df.groupby('seq').median()['poly'])
dfm['poly_std'] = mms(df.groupby('seq').std()['poly'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['seq'] = dfm.index
dfm = dfm.reset_index(drop=True)
T1 = np.percentile(dfm['primo'], 95)
T2 = np.percentile(dfm['primo_std'], 90)
T3 = np.percentile(dfm['prim'], 95)
T4 = np.percentile(dfm['prim_std'], 90)
T5 = np.percentile(dfm['poly'], 95)
T6 = np.percentile(dfm['poly_std'], 90)
print('length of dfm before outlier cleaning = {}'.format(len(dfm)) )
dfm = dfm.drop(np.where(dfm['primo'] > T1 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['primo_std'] > T2 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim'] > T3 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim_std'] > T4 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly'] > T5 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly_std'] > T6 )[0]).reset_index(drop=True)
print('length of dfm after outlier cleaning = {}'.format(len(dfm)) )
nucs = np.array(list(map(list, dfm['seq']))).copy()
nucs = pd.DataFrame(nucs.copy())
nucs = nucs.add_suffix('_nuc')
nucs = nucs.reset_index(drop=True)
dfm = pd.concat([dfm, nucs], axis=1)
dfm = dfm.reset_index(drop=True)
toKeep = [x for x in dfm.columns if 'std' not in x]
dfm = dfm.loc[:,toKeep]
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab])
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab]**0.5)
dfm.to_csv('data/chip_B_favor.csv', index=False)
#%% Heatmap of ABS Correlation
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def count_letters(df_nucs, rep_dict):
X = df_nucs.copy()
X = X.replace(rep_dict)
X = np.array(X)
X = np.sum(X,1)
return X
df = pd.read_csv('data/chip_B_favor.csv')
cols = df.columns
cols = [x for x in cols if 'nuc' in x]
df_nucs = df.loc[:,cols].copy()
df_labels = df.loc[:,['primo','prim','poly']]
df_res = pd.DataFrame()
# count appereances of each individual letter
for letter in ['A','C','G','T']:
rep_dict = {'A':0,'C':0,'G':0,'T':0}
rep_dict[letter] = 1
df_res['{}_count'.format(letter) ] = count_letters(df_nucs, rep_dict)
gtc_ind_start = ''.join( list(df_nucs.iloc[0,:]) ).find('GTC') - 5
gtc_ind_end = gtc_ind_start + 5 + 3 + 5
# extract puryn and prymidin densities
# A,<NAME>
# C,T Prymidins
""" =================== Left Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Left_Pur_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Left_Pry_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
""" =================== Center / Determinant Count ===================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Center_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Center_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
""" =================== Right Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Right_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Right_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
df_res = pd.concat([df_res, df_labels], axis=1)
plt.figure(figsize=(12,8))
df_corr = (df_res.corr().abs())
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
plt.figure(figsize=(12,8))
df_corr = df_corr.loc[['primo','prim','poly'],['primo','prim','poly']]
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
#%% K mers spectrum
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import entropy
NMERS = [1,2,3]
df = pd.read_csv('./data/chip_B_favor.csv')
labels = ['primo','prim','poly']
np.random.RandomState(42)
df.index = df['seq']
m2 = 'CCACCCCAAAAAACCCCGTCAAAACCCCAAAAACCA'
df.loc[m2,'primo']
im = plt.imread(r'C:\Users\Ben\Desktop/Picture1.png')
x = list(range(1,14))
y = [1,
0,
0.4,
0.6,
0.47,
0.13,
0.2,
0.3,
0.5,
0.46,
0.5,
0.67,
0.8]
x= np.array(x)
y= np.array(y)
plt.imshow(im)
plt.scatter(x,y, c='red')
#for col in labels:
#df = df.drop(np.where(df[col] > np.percentile(df[col],95))[0],axis=0).reset_index(drop=True)
#df = df.drop(np.where(df[col] < np.percentile(df[col],5))[0],axis=0).reset_index(drop=True)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
plt.figure(figsize=(18,16))
for i, N in enumerate(NMERS):
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_mer = np.sum(df_mer)
df_mer = df_mer/np.sum(df_mer)
df_mer = df_mer[(df_mer >= 0.01 )]
plt.subplot(len(NMERS),1,i+1)
plt.scatter(np.arange(len(df_mer)), df_mer, color=(['blue','red','green'])[i] )
plt.xticks(np.arange(len(df_mer)), df_mer.index, rotation=90)
#plt.legend([' Variance: {}'.format( np.var(df_mer)) ])
plt.title('{}-Mer'.format(N) )
plt.ylim([0, 0.3])
plt.ylabel('mer frequency')
#%% K-MEANS and Hirarchial clustering
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
NLIST = [5]
labels = ['poly','prim','primo']
labels = ['primo']
ShowTextOnDendogram = True
showKM = True
showHC = False
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
df_backup = df.copy()
# =============================================================================
# Hirarchical Clustering
# =============================================================================
from scipy.cluster import hierarchy
if(showHC):
#WORKS FINE
X = df_backup.drop(labels,axis=1).copy()
X = X.iloc[:,:].reset_index(drop=True)
Z = hierarchy.linkage(X, method='ward')
Z = pd.DataFrame(Z)
botline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])),-2] * 1.05
topline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])) + 1, -2] * 0.95
fig = plt.figure(figsize=(4, 6))
dn = hierarchy.dendrogram(Z, p=7, truncate_mode='level', color_threshold=40, distance_sort=True)
plt.hlines([botline, topline], xmin=0, xmax=len(Z), ls='--', alpha = 0.9 )
plt.ylabel('Ward Distance')
disticks = np.unique(np.sqrt(Z.iloc[:,-2]).astype(int))
#plt.yticks( disticks**2 , disticks)
plt.xticks([])
plt.xlabel('')
Z = hierarchy.linkage(X, method='ward')
X[labels] = df_backup[labels].copy()
thr = 40
dists = [ 20, 40, 80, 120]
fntsze = 22
thr = 40
for i, thr in enumerate(dists):
Xg = X.copy()
Xg['bin'] = hierarchy.fcluster(Z, thr, criterion='distance', depth=5, R=None, monocrit=None)
Xres = Xg.groupby('bin').sum()
Xres[labels] = Xg.groupby('bin').median()[labels]
xcount = Xg.copy()
xcount['count'] = 1
xcount = xcount.groupby('bin').sum()['count']
xcnew = [xcount.iloc[0]/2]
for j in xcount.index[1:]:
xcnew += [np.sum(xcount[:j-1]) + xcount[j]/2]
xcount = pd.Series( xcnew )
xcount.index = xcount.index + 1
#plt.subplot(4,1, i+1 )
#plt.scatter(Xres.index, Xres[labels])
toKeep = [x for x in X.drop(labels, axis=1).columns if '36' not in x]
Xres = (Xres.loc[:,toKeep])
Xres.columns = [x[-1] for x in Xres.columns]
Xres = Xres.T
Xres = Xres.groupby(Xres.index).sum()
for col in Xres.columns:
Xres[col] = Xres[col] / np.sum(Xres[col])
Xres = Xres.T
row_idx = 1
for row_idx in Xres.index:
row = Xres.loc[row_idx,:]
print(
xcount.iloc[row_idx-1]
)
accumsize = 0
for dx, lett in enumerate(row.index):
x_rng = plt.gca().get_xlim()[1]
# =============================================================================
# # ADDING TEXT TO DENDOGRAM
# =============================================================================
if(ShowTextOnDendogram == True):
plt.text(x= xcount.iloc[row_idx-1]*x_rng/len(Xg) + accumsize,
y=thr, horizontalalignment='left',
s=lett, fontsize=np.max([fntsze*row[lett], 6]) ,
weight='normal', fontname='arial')
accumsize += np.max([fntsze*row[lett], 8]) + 36
#% TODO MAKE THIS PRETTY
from sklearn.metrics import silhouette_score
res_ss = []
xvec = [5]
for i in xvec:
X = df.copy().drop(['bin'], axis=1, errors='ignore')
X = X.drop(labels, axis=1)
tmp_ss = []
for j in range(1):
km = KMeans(i, random_state=j )
y = km.fit_predict(X)
ss = silhouette_score( X, y )
tmp_ss += [ss]
print('sil score => mean: {} | std: {}'.format(np.mean(tmp_ss), np.std(tmp_ss)) )
res_ss += [np.mean(tmp_ss)]
plt.figure()
plt.scatter(xvec,res_ss)
plt.xlabel('K-Value')
plt.ylabel('Sil Score')
plt.show()
if(showKM):
col = 'primo'
plt.figure(figsize=(6,4))
for i, Nbins in enumerate(NLIST):
df = df_backup.copy()
km = KMeans(Nbins, random_state=42 )
df['bin'] = km.fit_predict(df.drop(labels,axis=1))
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4,
4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
tdf = df.loc[:,['bin',col]]
#rep_d = {0:'A',1:'B',2:'C',3:'D',4:'E'}
rep_d = {0:2,1:3,2:0,3:1,4:4}
df['bin'] = df['bin'].replace(rep_d)
centers = list(np.array(centers)[list(rep_d.values())])
print('Mean Words:')
print(centers)
#rep_d = {'A':2,'B':3,'C':0,'D':1,'E':4}
#df['bin'] = df['bin'].replace(rep_d)
plt.subplot(len(NLIST),1,i+1)
sns.violinplot(x="bin", y=col, data=df, palette="Blues", cut=0)
plt.ylim([-0.2, 1.2])
plt.ylabel('Primase \nBinding Scores', fontsize=12)
plt.title('Scores Distribution by Cluster', fontsize=12)
"""
for tx, tcent in zip(np.arange(np.max(tdf['bin'])+1) , centers):
chunks, chunk_size = len(tcent), len(tcent)//6
stlist = [ tcent[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
tcent = '\n'.join(stlist)
t = plt.text(x=tx-0.5, y=0, s=tcent, fontsize=10, color='red', fontweight='normal', backgroundcolor='white')
t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white'))
plt.xlim([-1, Nbins-1 + 0.5])
"""
#plt.xticks( np.arange(np.max(tdf['bin'])+1)
#,centers , rotation=-90, fontsize=12)
plt.yticks( [0,0.25,0.5,0.75,1], fontsize=12 )
plt.tight_layout()
plt.savefig('./out/kmeans/forpaper_B_centroids_' + str(Nbins) + 'bins')
plt.show()
#plt.close()
#%% PCA
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
NMERS = [3]
df = | pd.read_csv('./data/chip_B_favor.csv') | pandas.read_csv |
import pickle
import os
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import multiprocessing
# from sklearn.utils.random import sample_without_replacement
# from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
# from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
# from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA
# from sklearn.gaussian_process import GaussianProcessClassifier
# from sklearn.svm import LinearSVR
# from sklearn.neural_network import MLPClassifier
# from sklearn.feature_selection import RFECV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import make_scorer
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
from framework.data_portals.data_portal import DataPortal
from pyESN import ESN
all_tickers = pd.read_csv('C:\\Users\\kohle\\Documents\\Machine Learning\\Echo State Networks\\Stock_Data\\list.csv')[
'A'].tolist()
pknum = 0
ticker_range = (pknum * 7000, (pknum + 1) * 7000)
ticker_range = (0, len(all_tickers))
delay_minutes = 0
tasks = ['new', 'continue', 'loop_new'] # choose from: ['new', 'predict_all', 'continue', 'combine', 'loop_new']
# tasks = ['continue']
# tasks = ['combine']
tasks = []
new_env = False # instruction to keep specified model_env (instead of loading old one, when available)
end_int = -1 # condition to set number of iterations to be run
model_env = {
'all_tickers': all_tickers,
'tickers': np.random.choice(all_tickers, 500, replace=False),
'n_res_list': [30, 30, 30, 30, 30, 30, 50, 80],
'sparsity_list': [0.5, 0.75, 0.8, 0.85, 0.9, 0.925, 0.95, 0.96],
'train_len': 4000,
'drop_len': 200,
'n_trees': 200,
'n_comp': 10,
'vol': False,
'individual': False,
# 'model_ui': '0145_SGD',
'model_ui': '0041_SGD',
'folder_path': 'models/SGD_hinge_loss'
}
class LinDetrend(object):
lin_trend = None
def fit(self, X, y, **fit_params):
self.lin_trend = np.polyfit(range(len(X)), X, 1)
return self
def transform(self, X):
return X - np.polyval(self.lin_trend, range(len(X))).reshape((1, len(X))).T
def individual_fit_results(tickers, model, prep, net, pca=None, new_fit=True, drop_len=200, train_len=4000,
test_len=200, vol=True):
"""
model is assumed to generate a 1,0 classification to either buy or sell
"""
gen = portal.iter_get_uids('daily_prices', 'default', tickers)
df = pd.DataFrame() # Dataframe with tickers and performance metrics
df1 = pd.DataFrame() # array of model coefficients
df2 = pd.DataFrame() # array of trading results
df3 = pd.DataFrame() # array of buy & hold results
df4 = pd.DataFrame() # array of predictions from model
i = 0
for data in gen:
print(i)
x_train, x_test = np.zeros((0, sum(model_env['n_res_list']) + 1)), \
np.zeros((0, sum(model_env['n_res_list']) + 1))
y_train, y_test, y_cv, y_tcv = [], [], [], []
w_train, w_test = [], []
log_vol = np.log10(np.array(data['volume'] + 1).reshape((len(data), 1)))
log_prices = np.log10(np.array(data['adjusted_close']).reshape((len(data), 1)))
if len(log_prices) > train_len + test_len:
prep.fit(log_prices[:train_len])
log_prices = prep.transform(log_prices)
if vol:
prep.fit(log_vol[:train_len])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(data), 1))
states = net.get_states(log_vol, log_prices)
# if pca:
# states = pca.transform(states)
x_train = np.vstack((x_train, states[model_env['drop_len']:train_len]))
y_train += np.sign((np.sign(
log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[model_env['drop_len']:train_len,
0]) + 1) / 2).tolist()
w_train += np.abs(
log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[model_env['drop_len']:train_len,
0]).tolist()
y_cv += (log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[
model_env['drop_len']:train_len,
0]).tolist()
x_test = np.vstack((x_test, states[train_len:-1]))
y_test += np.sign(
(np.sign(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]) + 1) / 2).tolist()
w_test += np.abs(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]).tolist()
y_tcv += (log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]).tolist()
if pca:
states = pca.transform(states)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
if new_fit:
model.fit(x_train, y_train, sample_weight=w_train)
acc = model.score(states[1:], np.sign((np.sign(log_prices[1:, 0] - log_prices[:-1, 0]) + 1) / 2).tolist())
pred = model.predict(states[drop_len:])
hold = np.array(np.log10(data['adjusted_close'])[drop_len:])
trading = np.hstack((hold[0], (hold[0] + ((2 * pred[:-1] - 1) * (hold[1:] - hold[:-1])).cumsum())))
all_hold_ret = hold[-1] - hold[0]
all_trade_ret = trading[-1] - trading[0]
all_inc_ret = all_trade_ret / abs(all_hold_ret) - 1
train_hold_ret = hold[train_len - drop_len] - hold[0]
train_trade_ret = trading[train_len - drop_len] - trading[0]
train_inc_ret = train_trade_ret / abs(train_hold_ret) - 1
test_hold_ret = hold[train_len + test_len - drop_len] - hold[train_len - drop_len]
test_trade_ret = trading[train_len + test_len - drop_len] - trading[train_len - drop_len]
test_inc_ret = test_trade_ret - test_hold_ret
if isinstance(df2, pd.DataFrame):
df2 = np.pad(trading[:train_len + test_len],
[0, train_len + test_len - len(trading[:train_len + test_len])])
df3 = np.pad(hold[:train_len + test_len],
[0, train_len + test_len - len(hold[:train_len + test_len])])
# df1 = model._get_coef() #Support Vector Classifier (SVC)
# df1 = model.feature_importances_ #Random Forest (RF)
df1 = model.coef_ # SGDClassifier (SGD)
df4 = np.pad(pred[:train_len + test_len],
[0, train_len + test_len - len(pred[:train_len + test_len])])
df = df.append(pd.DataFrame([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,
train_hold_ret, train_trade_ret, train_inc_ret,
test_hold_ret, test_trade_ret, test_inc_ret]],
columns=['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret',
'all_inc_ret', 'train_hold_ret', 'train_trade_ret',
'train_inc_ret', 'test_hold_ret', 'test_trade_ret',
'test_inc_ret']))
else:
df2 = np.vstack((df2, np.pad(trading[:train_len + test_len],
[0, train_len + test_len - len(trading[:train_len + test_len])])))
df3 = np.vstack((df3, np.pad(hold[:train_len + test_len],
[0, train_len + test_len - len(hold[:train_len + test_len])])))
df1 = np.vstack((df1, model.coef_))
# df1 = np.vstack((df1, model._get_coef()))
# df1 = np.vstack((df1, model.feature_importances_()))
df4 = np.vstack((df4, np.pad(pred[:train_len + test_len],
[0, train_len + test_len - len(pred[:train_len + test_len])])))
df = df.append(pd.DataFrame([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,
train_hold_ret, train_trade_ret, train_inc_ret,
test_hold_ret, test_trade_ret, test_inc_ret]],
columns=['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret',
'all_inc_ret', 'train_hold_ret', 'train_trade_ret',
'train_inc_ret', 'test_hold_ret', 'test_trade_ret',
'test_inc_ret']))
i += 1
df.reset_index(drop=True, inplace=True)
return df, df1, df2, df3, df4
def inspect_ticker(ticker, model, prep, net, pca=None, vol=None, drop_len=200):
data = portal.get('daily_prices', 'default', ticker)
pp = np.log10(np.array(data['adjusted_close']).reshape((len(data), 1)))
prep.fit(pp[:model_env['train_len']])
pp = prep.transform(pp)
if vol:
log_vol = np.log10(np.array(ticker['volume'] + 1).reshape((len(ticker), 1)))
prep.fit(log_vol[:model_env['train_len']])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(data), 1))
states = net.get_states(log_vol, pp)
if pca:
states = pca.transform(states)
pred = model.predict(states[drop_len:])
# score = trading_score()
hold = data['adjusted_close'][drop_len:]
trading = hold[0] + ((2 * pred[:-1] - 1) * (hold[1:] - hold[:-1])).cumsum()
return hold, trading
def plot_ticker(ticker, model, prep, net, pca=None, vol=False):
hold, trading = inspect_ticker(ticker, model, prep, net, pca=pca, vol=vol)
plt.plot(hold, label=ticker)
plt.plot(trading, label=ticker + '_ESN')
plt.legend()
def generate_plots(tickers, model, prep, net):
for ticker in tickers:
print(ticker)
yield plot_ticker(ticker, model, prep, net)
def trading_score(y, y_pred):
return sum(y * np.sign(y_pred)) / sum(y * np.sign(y))
def combine_pickles(model_uis, path, keys=('out', 'coefs', 'trading', 'hold', 'pred')):
""" Combines dictionaries of arrays (saved as separate pickles) into in a single dictionary of arrays """
data_dict = {}
if isinstance(model_uis, str):
model_uis = [model_uis]
for model_ui in model_uis:
data_dict[model_ui] = dict(zip(keys, [None] * len(keys)))
for frame in keys:
with open(f'{path}/{model_ui}/{model_ui}_{frame}0.pkl', 'rb') as file:
data_dict[model_ui][frame] = pickle.load(file)
for frame in keys:
for i in range(1, pknum + 1):
with open(f'{path}/{model_ui}/{model_ui}_{frame}{i}.pkl', 'rb') as file:
df = pickle.load(file)
if isinstance(df, pd.DataFrame):
data_dict[model_ui][frame] = data_dict[model_ui][frame].append(df)
else:
data_dict[model_ui][frame] = np.vstack((data_dict[model_ui][frame], df))
return data_dict.copy()
def predict_all(model_env, ticker_range, all_tickers, pknum=0, new_env=True):
path = model_env["folder_path"]
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_def.pkl', 'rb') as file:
model_def = pickle.load(file)
if not new_env:
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_env.pkl', 'rb') as file:
model_env = pickle.load(file)
out, coefs, trading, hold, pred = | pd.DataFrame() | pandas.DataFrame |
'''
A scenario discovery oriented implementation of PRIM.
The implementation of prim provided here is data type aware, so
categorical variables will be handled appropriately. It also uses a
non-standard objective function in the peeling and pasting phase of the
algorithm. This algorithm looks at the increase in the mean divided
by the amount of data removed. So essentially, it uses something akin
to the first order derivative of the original objective function.
The implementation is designed for interactive use in combination with
the jupyter notebook.
'''
from __future__ import (absolute_import, print_function, division,
unicode_literals)
import copy
import math
from operator import itemgetter
import warnings
from ema_workbench.util import ema_logging
try:
import altair as alt
except ImportError:
alt = None
warnings.warn(("altair based interactive "
"inspection not available"), ImportWarning)
import matplotlib as mpl
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot # @UnresolvedImport
import numpy as np
import pandas as pd
import seaborn as sns
from .plotting_util import make_legend
from ..util import (EMAError, debug, INFO, temporary_filter,
get_module_logger)
from . import scenario_discovery_util as sdutil
# Created on 22 feb. 2013
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
__all__ = ['ABOVE', 'BELOW', 'setup_prim', 'Prim', 'PrimBox',
'PrimException', 'MultiBoxesPrim']
LENIENT2 = 'lenient2'
LENIENT1 = 'lenient1'
ORIGINAL = 'original'
ABOVE = 1
BELOW = -1
PRECISION = '.2f'
_logger = get_module_logger(__name__)
def get_quantile(data, quantile):
'''
quantile calculation modeled on the implementation used in sdtoolkit
Parameters
----------
data : nd array like
dataset for which quantile is needed
quantile : float
the desired quantile
'''
assert quantile > 0
assert quantile < 1
data = np.sort(data)
i = (len(data)-1)*quantile
index_lower = int(math.floor(i))
index_higher = int(math.ceil(i))
value = 0
if quantile > 0.5:
# upper
while (data[index_lower] == data[index_higher]) & (index_lower > 0):
index_lower -= 1
value = (data[index_lower]+data[index_higher])/2
else:
# lower
while (data[index_lower] == data[index_higher]) & \
(index_higher < len(data)-1):
index_higher += 1
value = (data[index_lower]+data[index_higher])/2
return value
def _pair_wise_scatter(x, y, boxlim, box_init, restricted_dims):
''' helper function for pair wise scatter plotting
#TODO the cases of interest should be in red rather than in blue
# this will give a nice visual insight into the quality of the box
# currently it is done through the face color being white or blue
# this is not very clear
Parameters
----------
x : DataFrame
the experiments
y : numpy array
the outcome of interest
box_lim : DataFrame
a boxlim
box_init : DataFrame
restricted_dims : collection of strings
list of uncertainties that define the boxlims
'''
x = x[restricted_dims]
data = x.copy()
# TODO:: have option to change
# diag to CDF, gives you effectively the
# regional sensitivity analysis results
categorical_columns = data.select_dtypes('category').columns.values
categorical_mappings = {}
for column in categorical_columns:
# reorder categorical data so we
# can capture them in a single column
categories_inbox = boxlim.loc[0, column]
categories_all = box_init.loc[0, column]
missing = categories_all - categories_inbox
categories = list(categories_inbox) + list(missing)
data[column] = data[column].cat.set_categories(categories)
# keep the mapping for updating ticklabels
categorical_mappings[column] = dict(enumerate(data[column].cat.categories))
# replace column with codes
data[column] = data[column].cat.codes
data['y'] = y # for testing
grid = sns.pairplot(data=data, hue='y', vars=x.columns.values)
cats = set(categorical_columns)
for row, ylabel in zip(grid.axes, grid.y_vars):
ylim = boxlim[ylabel]
if ylabel in cats:
y = -0.2
height = len(ylim[0])-0.6 # 2 * 0.2
else:
y = ylim[0]
height = ylim[1] - ylim[0]
for ax, xlabel in zip(row, grid.x_vars):
if ylabel == xlabel: continue
if xlabel in cats:
xlim = boxlim.loc[0, xlabel]
x = -0.2
width = len(xlim)-0.6 # 2 * 0.2
else:
xlim = boxlim[xlabel]
x = xlim[0]
width = xlim[1] - xlim[0]
xy = x, y
box = patches.Rectangle(xy, width, height, edgecolor='red',
facecolor='none', lw=3)
ax.add_patch(box)
# do the yticklabeling for categorical rows
for row, ylabel in zip(grid.axes, grid.y_vars):
if ylabel in cats:
ax = row[0]
labels = []
for entry in ax.get_yticklabels():
_, value = entry.get_position()
try:
label = categorical_mappings[ylabel][value]
except KeyError:
label = ''
labels.append(label)
ax.set_yticklabels(labels)
# do the xticklabeling for categorical columns
for ax, xlabel in zip(grid.axes[-1], grid.x_vars):
if xlabel in cats:
labels = []
locs = []
mapping = categorical_mappings[ylabel]
for i in range(-1, len(mapping)+1):
locs.append(i)
try:
label = categorical_mappings[xlabel][i]
except KeyError:
label = ''
labels.append(label)
ax.set_xticks(locs)
ax.set_xticklabels(labels, rotation=90)
return grid
def setup_prim(results, classify, threshold, incl_unc=[], **kwargs):
"""Helper function for setting up the prim algorithm
Parameters
----------
results : tuple
tuple of DataFrame and dict with numpy arrays
the return from :meth:`perform_experiments`.
classify : str or callable
either a string denoting the outcome of interest to
use or a function.
threshold : double
the minimum score on the density of the last box
on the peeling trajectory. In case of a binary
classification, this should be between 0 and 1.
incl_unc : list of str, optional
list of uncertainties to include in prim analysis
kwargs : dict
valid keyword arguments for prim.Prim
Returns
-------
a Prim instance
Raises
------
PrimException
if data resulting from classify is not a 1-d array.
TypeError
if classify is not a string or a callable.
"""
x, y, mode = sdutil._setup(results, classify, incl_unc)
return Prim(x, y, threshold=threshold, mode=mode, **kwargs)
def calculate_qp(data, x, y, Hbox, Tbox, box_lim, initial_boxlim):
'''Helper function for calculating quasi p-values'''
if data.size==0:
return [-1, -1]
u = data.name
dtype = data.dtype
unlimited = initial_boxlim[u]
if np.issubdtype(dtype, np.number):
qp_values = []
for direction, (limit, unlimit) in enumerate(zip(data,
unlimited)):
if unlimit != limit:
temp_box = box_lim.copy()
temp_box.loc[direction, u] = unlimit
qp = sdutil._calculate_quasip(x, y, temp_box,
Hbox, Tbox)
else:
qp = -1
qp_values.append(qp)
else:
temp_box = box_lim.copy()
temp_box.loc[:, u] = unlimited
qp = sdutil._calculate_quasip(x, y, temp_box,
Hbox, Tbox)
qp_values = [qp, -1]
return qp_values
class CurEntry(object):
'''a descriptor for the current entry on the peeling and pasting
trajectory'''
def __init__(self, name):
self.name = name
def __get__(self, instance, _):
return instance.peeling_trajectory[self.name][instance._cur_box]
def __set__(self, instance, value):
raise PrimException("this property cannot be assigned to")
class PrimBox(object):
'''A class that holds information over a specific box
Attributes
----------
coverage : float
coverage of currently selected box
density : float
density of currently selected box
mean : float
mean of currently selected box
res_dim : int
number of restricted dimensions of currently selected box
mass : float
mass of currently selected box
peeling_trajectory : DataFrame
stats for each box in peeling trajectory
box_lims : list
list of box lims for each box in peeling trajectory
by default, the currently selected box is the last box on the
peeling trajectory, unless this is changed via
:meth:`PrimBox.select`.
'''
coverage = CurEntry('coverage')
density = CurEntry('density')
mean = CurEntry('mean')
res_dim = CurEntry('res_dim')
mass = CurEntry('mass')
_frozen = False
def __init__(self, prim, box_lims, indices):
'''init
Parameters
----------
prim : Prim instance
box_lims : DataFrame
indices : ndarray
'''
self.prim = prim
# peeling and pasting trajectory
colums = ['coverage', 'density', 'mean', 'res_dim',
'mass', 'id']
self.peeling_trajectory = pd.DataFrame(columns=colums)
self.box_lims = []
self.qp = []
self._resampled = []
self.yi_initial = indices[:]
columns = ['name', 'lower', 'upper', 'minimum', 'maximum',
'qp_lower', 'qp_upper', 'id']
self.boxes_quantitative = pd.DataFrame(columns=columns)
columns = ['item', 'name', 'n_items', 'x', 'id']
self.boxes_nominal = pd.DataFrame(columns=columns)
self._cur_box = -1
# indices van data in box
self.update(box_lims, indices)
def __getattr__(self, name):
'''
used here to give box_lim same behaviour as coverage, density,
mean, res_dim, and mass. That is, it will return the box lim
associated with the currently selected box.
'''
if name == 'box_lim':
return self.box_lims[self._cur_box]
else:
raise AttributeError
def inspect(self, i=None, style='table', **kwargs):
'''Write the stats and box limits of the user specified box to
standard out. if i is not provided, the last box will be
printed
Parameters
----------
i : int, optional
the index of the box, defaults to currently selected box
style : {'table', 'graph'}
the style of the visualization
additional kwargs are passed to the helper function that
generates the table or graph
'''
if i == None:
i = self._cur_box
stats = self.peeling_trajectory.iloc[i].to_dict()
stats['restricted_dim'] = stats['res_dim']
qp_values = self.qp[i]
uncs = [(key, value) for key, value in qp_values.items()]
uncs.sort(key=itemgetter(1))
uncs = [uncs[0] for uncs in uncs]
if style == 'table':
return self._inspect_table(i, uncs, qp_values, **kwargs)
elif style == 'graph':
return self._inspect_graph(i, uncs, qp_values, **kwargs)
else:
raise ValueError("style must be one of graph or table")
def _inspect_table(self, i, uncs, qp_values):
'''Helper function for visualizing box statistics in
table form'''
# make the descriptive statistics for the box
print(self.peeling_trajectory.iloc[i])
print()
# make the box definition
columns = pd.MultiIndex.from_product([['box {}'.format(i)],
['min', 'max', 'qp values']])
box_lim = pd.DataFrame(np.zeros((len(uncs), 3)),
index=uncs,
columns=columns)
for unc in uncs:
values = self.box_lims[i][unc]
box_lim.loc[unc] = [values[0], values[1],
str(qp_values[unc])]
print(box_lim)
print()
def _inspect_graph(self, i, uncs, qp_values,
ticklabel_formatter="{} ({})",
boxlim_formatter="{: .2g}",
table_formatter='{:.3g}'):
'''Helper function for visualizing box statistics in
graph form'''
return sdutil.plot_box(self.box_lims[i], qp_values,
self.prim.box_init, uncs,
self.peeling_trajectory.loc[i, 'coverage'],
self.peeling_trajectory.loc[i, "density"],
ticklabel_formatter=ticklabel_formatter,
boxlim_formatter=boxlim_formatter,
table_formatter=table_formatter)
def inspect_tradeoff(self):
boxes = []
nominal_vars = []
quantitative_dims = set(self.prim.x_float_colums.tolist() +\
self.prim.x_int_columns.tolist())
nominal_dims = set(self.prim.x_nominal_columns)
box_zero = self.box_lims[0]
for i, (entry, qp) in enumerate(zip(self.box_lims, self.qp)):
qp = pd.DataFrame(qp, index=['qp_lower', 'qp_upper'])
dims = qp.columns.tolist()
quantitative_res_dim = [e for e in dims if e in quantitative_dims]
nominal_res_dims = [e for e in dims if e in nominal_dims]
# handle quantitative
df = entry
box = df[quantitative_res_dim]
box.index = ['x1', 'x2']
box = box.T
box['name'] = box.index
box['id'] = int(i)
box['minimum'] = box_zero[quantitative_res_dim].T.iloc[:, 0]
box['maximum'] = box_zero[quantitative_res_dim].T.iloc[:, 1]
box = box.join(qp.T)
boxes.append(box)
# handle nominal
for dim in nominal_res_dims:
# TODO:: qp values
items = df[nominal_res_dims].loc[0,:].values[0]
for j, item in enumerate(items):
entry = dict(name=dim, n_items=len(items)+1,
item=item, id=int(i),
x=j/len(items))
nominal_vars.append(entry)
boxes = | pd.concat(boxes) | pandas.concat |
"""
For working with metabolic models
"""
from __future__ import print_function, division, absolute_import
import os
import json
import pandas as pd
from ..globals import MODEL_DIR
from math import isnan
# ----------------------------------------
# Functions for aggregation (and/or)
# ----------------------------------------
def min_w_nan(vals):
"""
Min which propagates nan.
Normal python 'min' ignores nan.
"""
if any([isnan(x) for x in vals]):
return float('nan')
else:
return min(vals)
def mean_nan_zero(vals):
"""
Mean which treats 'nan' values as zeros
"""
vals = [0 if isnan(x) else x for x in vals]
return sum(vals) / len(vals)
def median_nan_zero(vals):
"""
Median which treats 'nan' values as zeros
"""
vals = [0 if isnan(x) else x for x in vals]
vals = sorted(vals)
if len(vals) % 2 == 1:
middle_i = int((len(vals)-1)/2)
return vals[middle_i]
else:
right_i = int(len(vals)/2)
left_i = right_i-1
return (vals[left_i] + vals[right_i])/2
def sum_wo_nan(vals):
"""
Max which ignores nan.
Normal python sum propagates nan.
"""
vals = [x for x in vals if not isnan(x)]
return sum(vals)
# ----------------------------------------
# Model class and related classes
# ----------------------------------------
class MetabolicModel(object):
def __init__(self, name):
self.name = name
self.reactions = {}
self.species = {}
self.compartments = {}
self.objectives = {}
self._maximum_flux = None
self.media = 'NoMedia'
def getReactions(self):
"""
Returns a list of reaction id's in the MetabolicModel
"""
return list(self.reactions.keys())
def getReactionBounds(self):
"""
Returns two dicts, each mapping reaction id -> bound
Returns:
lower_bounds
upper_bounds
"""
lower_bounds = {x.id: x.lower_bound for x in self.reactions.values()}
upper_bounds = {x.id: x.upper_bound for x in self.reactions.values()}
return lower_bounds, upper_bounds
def getReactionExpression(self, expression,
and_function='min',
or_function=sum_wo_nan):
# type: (pandas.Series) -> dict
"""
Evaluates a score for every reaction, using the expression data.
This is used for constraint/penalty generation.
If a score cannot be computed, NaN is used to fill
Returns a dict:
key: reaction id (str)
val: reaction score (float)
"""
# resolve the AND function
if and_function == 'min':
and_function = min_w_nan
elif and_function == 'mean':
and_function = mean_nan_zero
elif and_function == 'median':
and_function = median_nan_zero
else:
raise ValueError("Invalid value for and_function: " +
str(and_function))
score_dict = {}
for r_id, reaction in self.reactions.items():
score = reaction.eval_expression(
expression, and_function, or_function)
score_dict[r_id] = score
return score_dict
def limitExchangeReactions(self, limit):
"""
Limits the rate of metabolite exchange.
Applies the limit of `limit` where the limit is non-zero
Directionality is preserved
Exchange reactions are defined as any reaction in which
metabolites are produced from nothing
Does not return anything - modifies objects in place
"""
exchanges = {r_id: r for r_id, r in self.reactions.items()
if r.is_exchange}
# Make sure all exchange reactions have reactants but no products
# Instead of the opposite
for reaction in exchanges.values():
if len(reaction.products) > 0 and len(reaction.reactants) == 0:
# Metabolites created as products - limit forward flux
if reaction.upper_bound > limit:
reaction.upper_bound = limit
elif len(reaction.products) == 0 and len(reaction.reactants) > 0:
# Metabolites created as reactants - limit reverse flux
if reaction.lower_bound < -1*limit:
reaction.lower_bound = -1*limit
else:
raise Exception("Should not occur")
def getSMAT(self):
"""
Returns a sparse form of the s-matrix
result is a dict
key: metabolite (species) id
value: list of 2-tuples (reaction_id, coefficient)
coefficient is positive if metabolite is produced in the reaction,
negative if consumed
"""
s_mat = {}
for reaction_id, rr in self.reactions.items():
# reactants
for metabolite, coefficient in rr.reactants.items():
if metabolite not in s_mat:
s_mat[metabolite] = []
s_mat[metabolite].append((reaction_id, coefficient * -1))
# products
for metabolite, coefficient in rr.products.items():
if metabolite not in s_mat:
s_mat[metabolite] = []
s_mat[metabolite].append((reaction_id, coefficient))
return s_mat
def make_unidirectional(self):
"""
Splits every reaction into a positive and negative counterpart
"""
uni_reactions = {}
for reaction in self.reactions.values():
if reaction.is_pos_unidirectional: # just ensure suffix and continue
if (not reaction.id.endswith('_pos')) and \
(not reaction.id.endswith('_neg')):
reaction.id = reaction.id + "_pos"
uni_reactions[reaction.id] = reaction
continue
# Copy the pos_reaction
pos_reaction = Reaction(from_reaction=reaction)
# Copy the negative reaction and Invert
neg_reaction = Reaction(from_reaction=reaction)
neg_reaction.invert()
# Adjust bounds for both
# Examples
# Positive: Original -> Clipped
# 0:10 -> 0:10
# -10:0 -> 0:0
# 5:7 -> 5:7
# -9:-5 -> 0:0
#
# Negative: Original -> Flipped -> Clipped
# 0:10 -> -10:0 -> 0:0
# -10:0 -> 0:10 -> 0:10
# 5:7 -> -7:-5 -> 0:0
# -9:-5 -> 5:9 -> 5:9
if pos_reaction.upper_bound < 0:
pos_reaction.upper_bound = 0
if pos_reaction.lower_bound < 0:
pos_reaction.lower_bound = 0
if neg_reaction.upper_bound < 0:
neg_reaction.upper_bound = 0
if neg_reaction.lower_bound < 0:
neg_reaction.lower_bound = 0
pos_reaction.id = pos_reaction.id + "_pos"
neg_reaction.id = neg_reaction.id + "_neg"
# Only add reactions if they can carry flux
if pos_reaction.upper_bound > 0:
neg_reaction.reverse_reaction = pos_reaction
uni_reactions[pos_reaction.id] = pos_reaction
if neg_reaction.upper_bound > 0:
pos_reaction.reverse_reaction = neg_reaction
uni_reactions[neg_reaction.id] = neg_reaction
self.reactions = uni_reactions
def load_media(self, media_name):
"""
Loads information in the media file and uses it to
modify exchange reaction bounds
Media files are stored in the model's directory under
the `media` folder with names `<media_name>.json`
Media file contains a JSON dict with keys corresponding to
reaction IDs and values corresponding to reaction
upper-bounds
"""
media_file = media_name + '.json'
media_file = os.path.join(MODEL_DIR, self.name, 'media', media_file)
with open(media_file) as fin:
media = json.load(fin)
for rid, ub in media.items():
self.reactions[rid].upper_bound = ub
self.media = media_name
def remove_isoform_summing(self):
"""
Removes instances where two isoforms of the same gene are summed/OR'd together
"""
for reaction in self.reactions.values():
if reaction.gene_associations:
reaction.gene_associations.remove_isoform_summing()
def _calc_max_flux(self):
"""
Determines the max (absolute) flux of the model
"""
max_flux = 0
for reaction in self.reactions.values():
max_flux = max(abs(reaction.lower_bound),
abs(reaction.upper_bound),
max_flux)
self._maximum_flux = max_flux
@property
def maximum_flux(self):
if self._maximum_flux is None:
self._calc_max_flux()
return self._maximum_flux
@property
def reaction_meta(self):
out = pd.DataFrame()
rows = [ | pd.Series(x.meta, name=x.id) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 7 15:19:27 2020
utilities
@author: Merten
"""
import pandas as pd
import numpy as np
import os
import scipy.interpolate as scpinter
from matplotlib import pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import tensorflow as tf
"""
INPUT DATA:
- absorption curves
- 3079 measurements, 1960 frequency points
- smoothened (even though the 'outliers' are no true outliers)
"""
def load_data(n_sampling):
""" load the raw measurement data and preprocess.
n_sampling: number of frequency points requested by the user. 1960 for the original 1Hz sampling
"""
"""
INPUT and TARGET DATA
- measurements of alpha, sampled at 1Hz within 270-2229 Hz range
- data are not noisy, and there are no true outliers. All obvious outliers
are in fact physical effects that are not yet understood. However, we will
smoothen the data for the purpose of this work.
Furthermore, we'll downsample the data to reduce the required complexity.
The number of sampling points is given by <n_sampling>.
"""
# --- load the absorption measurements (input features)
alpha = np.array(pd.read_csv(os.path.realpath('.') + '\\' + 'alphas.csv', header=None))
# corresponding frequency vector
f_min = 270 # minimum frequency
f_max = 2229 # maximum frequency
f = np.linspace(start=f_min, stop=f_max, num=alpha.shape[1]) # 1Hz sampling
# --- load the factors (target values)
targets = np.array(pd.read_csv(os.path.realpath('.') + '\\' + 'targets.csv', header=None))
# create a pandas data frame
factor_names = ['30mm', '40mm', '50mm', '80mm', # specimen thickness
'89mm', '90mm', '91mm', # specimen diameter
'HZD', 'MES', 'SLF', 'SLH', 'SSF', 'SSH', 'WSS', # cutting technology
'plunger_fixed', 'plunger_moved', # mounting
'computer', 'person1', 'person2', 'person3', # operator
'1', '2', '3', # sample index (obsolete)
'x', 'y', 'z'] # measurement index (obsolete)
factors = pd.DataFrame(targets, columns=factor_names)
# we do not need the number of the probe, neither the measurement number:drop them
factors = factors.drop(['1', '2', '3', 'x', 'y', 'z'], axis=1)
print('number of factors: '+str(np.array(factors).shape[1]))
# -- filtering
# there are some absorption curves that are commpletely negative. We'll remove them
mins = np.mean(alpha, axis=1) # find indices of the all-negatives
# remove the all-negative samples
alpha = alpha[mins>0,:]
factors = factors[:][mins>0]
print('removed ' + str(np.sum(mins<0)) + ' all-negative samples')
# replace all negative entries with small value (0.01)
alpha_pos = result = np.where(alpha<0, 0.01, alpha)
# now fill in the current drops by rolling median and rolling max
alpha_smooth_med = np.array(pd.DataFrame(alpha_pos).rolling(window=25, center=True, min_periods=1, axis=1).median())
alpha_smooth_max = np.array(pd.DataFrame(alpha_smooth_med).rolling(window=50, center=True, min_periods=1, axis=1).max())
# --- downsampling
m = int(len(f)/n_sampling)
f_coarse = f[::m] # pick each mth element
alpha_coarse = alpha_smooth_max[:, ::m] # pick each mth element
n_observations = alpha_coarse.shape[0]
n_inputs = len(f_coarse)
print('number of measurements: ' + str(n_observations))
print('number of frequency points: ' + str(n_inputs))
return alpha_coarse, factors, factor_names, f_coarse
def prepare_data_univariate(X, factors, quant, cv_flag):
""" Do the data splitting.
inputs:
- X: absorption values
- factors: targets
- quant: current factor dimension
- cv_flag: wether to do cross-validation splitting or not
"""
"""
1. Select one of the target values
quant:
- 'thickness'
- 'diameter'
- 'cutting_tech'
- 'operator'
- 'mounting'
"""
# extract the one-hot encoded target values from the factors data frame
if quant == 'thickness':
class_names = ['30mm', '40mm', '50mm', '80mm']
elif quant == 'diameter':
class_names = ['89mm', '90mm', '91mm']
elif quant == 'cutting_tech':
class_names = ['HZD', 'MES', 'SLF', 'SLH', 'SSF', 'SSH', 'WSS']
elif quant == 'operator':
class_names = ['computer', 'person1', 'person2', 'person3']
elif quant == 'mounting':
class_names = ['plunger_fixed', 'plunger_moved']
else:
print('ERROR, wrong quantity chosen!')
# get the correct factors
y = np.array(factors[class_names])
"""
2. Shuffle the data
the data comes in very structured, so we better shuffle it
"""
X, y = shuffle(X, y, random_state=1)
"""
3. cross-validation splitting (if requested)
"""
if cv_flag:
# perform a stratified k=5-fold cross validation split
skf = KFold(n_splits=5)
skf.get_n_splits(X, y)
X_train = []
X_test = []
y_train = []
y_test = []
for train_index, test_index in skf.split(X, y):
# print(test_index)
X_train_temp, X_test_temp = X[train_index], X[test_index]
y_train_temp, y_test_temp = y[train_index], y[test_index]
X_train.append(X_train_temp)
X_test.append(X_test_temp)
y_train.append(y_train_temp)
y_test.append(y_test_temp)
else:
# do a stratified split (75-25)
X_train, X_test, y_train, y_test = train_test_split(X, y,
stratify=y,
test_size=0.25)
return X_train, X_test, y_train, y_test, class_names
def get_univariate_targets(y, quant):
""" get the correct data labels.
y: output values (one-hot encoded)
quant: factor (as string), see below
"""
# extract the one-hot encoded target values from the factors data frame
if quant == 'thickness':
class_names = ['30mm', '40mm', '50mm', '80mm']
elif quant == 'diameter':
class_names = ['89mm', '90mm', '91mm']
elif quant == 'cutting_tech':
class_names = ['HZD', 'MES', 'SLF', 'SLH', 'SSF', 'SSH', 'WSS']
elif quant == 'operator':
class_names = ['computer', 'person1', 'person2', 'person3']
elif quant == 'mounting':
class_names = ['plunger_fixed', 'plunger_moved']
else:
print('ERROR, wrong quantity chosen!')
# get the correct one-hot encoded target values
if type(y) is list:
for idx,y_temp in enumerate(y):
y[idx] = y_temp[class_names]
else:
y = y[class_names]
return y, class_names
def build_model(model_name, n_features, n_outputs, multilabel_flag):
"""
Build a classification model.
- activation: ReLU except for last layer
- activation (output layer):
- sigmoid (multi-label),
- softmax (single label, multiclass). The output will add up to 1
for the MULTICLASS + MULTILABEL setting:
choose sigmoid activation + binary_crossentropy
do NOT use softmax (sum of outputs will equal to one)
Parameters
----------
model_name : TYPE
DESCRIPTION.
n_features : TYPE
DESCRIPTION.
n_outputs : TYPE
DESCRIPTION.
Returns
-------
model : TYPE
DESCRIPTION.
"""
if multilabel_flag:
last_act = 'sigmoid'
else:
last_act = 'softmax'
if model_name == 'baseline':
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(n_features, input_shape=(n_features,), activation='relu', kernel_initializer='uniform'))
model.add(tf.keras.layers.Dense(int(n_features/2), activation='relu'))
model.add(tf.keras.layers.Dense(50, activation='relu'))
model.add(tf.keras.layers.Dense(25, activation='relu'))
model.add(tf.keras.layers.Dense(n_outputs, activation=last_act))
model.summary()
elif model_name == 'deepmlp':
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(n_features, input_shape=(n_features,), activation='relu', kernel_initializer='uniform'))
model.add(tf.keras.layers.Dropout(0.2))
if n_features == 1960:
model.add(tf.keras.layers.Dense(int(n_features/2), activation='relu')) # 980
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/4), activation='relu')) # 490
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/8), activation='relu')) # 245
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/16), activation='relu')) # 123
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/32), activation='relu')) # 62
model.add(tf.keras.layers.Dropout(0.2))
elif n_features == 980:
model.add(tf.keras.layers.Dense(int(n_features/2), activation='relu')) # 490
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/4), activation='relu')) # 245
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/8), activation='relu')) # 123
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/16), activation='relu')) # 62
model.add(tf.keras.layers.Dropout(0.2))
elif n_features == 392:
model.add(tf.keras.layers.Dense(int(n_features/2), activation='relu')) # 196
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/4), activation='relu')) # 98
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/16), activation='relu')) # 49
model.add(tf.keras.layers.Dropout(0.2))
elif n_features == 196:
model.add(tf.keras.layers.Dense(int(n_features/2), activation='relu')) # 98
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(int(n_features/16), activation='relu')) # 49
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(25, activation='relu'))
model.add(tf.keras.layers.Dense(n_outputs, activation=last_act))
model.summary()
return model
def train_evaluate_model(model, epochs, batch_size, X_train, X_test, y_train, y_test):
"""
Train a given model.
As we have only classification tasks (binary, or multiclass), we stick to
the following settings:
- loss function: ?
- optimizer: adam
- metric: accuracy (the data set is balanced, so acc is ok)
Parameters
----------
model : Keras (TF) model
keras model compiled using the <build_model> function
epochs : int
number of epochs
batch_size : int
batch size. Incease for smoothing the training behavior
X_train : np array
input data, training set
X_test : np array
input data, test set
y_train : np array
output data, training set
y_test : np array
output data, test set
Returns
-------
model : keras (TF) model
trained model instance
history : keras
model training history
test_acc : float
test set accuracy
"""
# we will have to switch the loss function for the binary / multiclass setting
n_output = y_train.shape[1]
if n_output == 2:
loss_fun = 'binary_crossentropy'
elif n_output > 2:
loss_fun = 'categorical_crossentropy'
# compile model
model.compile(loss=loss_fun, optimizer='Adam', metrics=['accuracy'])
# fit model
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), \
epochs=epochs, batch_size=batch_size, verbose=0)
# evaluate the model
_, train_acc = model.evaluate(X_train, y_train, verbose=0)
_, test_acc = model.evaluate(X_test, y_test, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
# # plot loss during training
#plt.figure(num=None, figsize=(16, 6), dpi=100, facecolor='w')
#plt.subplot(121)
#plt.ylabel('Loss')
#plt.plot(history.history['loss'], label='train')
#plt.plot(history.history['val_loss'], label='test')
#plt.legend()
# plot accuracy during training
#plt.subplot(122)
#plt.ylabel('Accuracy')
#plt.xlabel('epochs')
#plt.plot(history.history['accuracy'], label='train')
#plt.plot(history.history['val_accuracy'], label='test')
#plt.legend()
#plt.savefig('temp_training_history.png')
#plt.show()
return model, history, test_acc
def get_high_confidence_predictions(y_gt, y_pred, X, conf):
""" for a given data set, keep only those samples for which we achieved acceptable confidence levels
"""
dp = np.sum(y_pred*y_gt, axis=1)
print(dp.shape)
X_conf = X[dp > conf,:]
y_conf = y_gt[dp > conf,:]
print('Confidence >' + str(conf) + ' filtering. Keeping ' + str(X_conf.shape[0]) + ' samples out of ' + str(X.shape[0]))
return X_conf, y_conf
def prepare_data_multivariate(X, factors, cv_flag):
# get the target variables column names
y_col_names = factors.columns
"""
1. Shuffle the data
the data comes in very structured, so we better shuffle it
"""
y = np.array(factors)
X, y = shuffle(X, y, random_state=0)
"""
2. cross-validation splitting (if requested)
"""
if cv_flag:
# perform a stratified k=5-fold cross validation split
skf = KFold(n_splits=5)
skf.get_n_splits(X, y)
X_train = []
X_test = []
y_train = []
y_test = []
for train_index, test_index in skf.split(X, y):
# print(test_index)
X_train_temp, X_test_temp = X[train_index], X[test_index]
y_train_temp, y_test_temp = y[train_index], y[test_index]
# make the labels a pd data frame again
y_train_temp = pd.DataFrame(y_train_temp, columns=y_col_names)
y_test_temp = pd.DataFrame(y_test_temp, columns=y_col_names)
# append to list of CV data sets
X_train.append(X_train_temp)
X_test.append(X_test_temp)
y_train.append(y_train_temp)
y_test.append(y_test_temp)
else:
# do a stratified split (75-25)
X_train, X_test, y_train, y_test = train_test_split(X, y,
stratify=y,
test_size=0.25)
y_train = pd.DataFrame(y_train, columns=y_col_names)
y_test = | pd.DataFrame(y_test, columns=y_col_names) | pandas.DataFrame |
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
import xgboost as xgb # use xgboost=1.0.2
import pickle
def read_excel(filePath):
df = pd.read_excel(filePath, sheet_name='Sheet1_user_dt')
df_1 = df.dropna()
drop_colume = ['email',
'sn',
'username',
'reg_time',
'birthday',
'reg_type',
'reg_state',
'country',
'city',
'area',
'num_country',
'num_city',
'num_area',
'sell_type',
'sell_reason',
'sell_target']
df_2 = df_1.drop(drop_colume, axis=1)
return df_2
def split_data(df):
train, test = train_test_split(df, test_size=0.3)
target_factor = 'sc_day_month'
drop_factor = ['used_day_month', 'used_freq_day', 'used_day_month',
'sc_times', 'no_sc_times', 'sc_days', 'no_sc_days', 'sc_freq_day']
train_1 = train.drop(drop_factor, axis=1)
test_1 = test.drop(drop_factor, axis=1)
y_train = train_1[target_factor]
x_train = train_1.drop(target_factor, axis=1)
y_test = test_1[target_factor]
x_test = test_1.drop(target_factor, axis=1)
return x_train, y_train, x_test, y_test
def xgbmodel_fit(X_train, Y_train, learning_rate, n_estimators, max_depth, gamma):
xgb_model = xgb.XGBClassifier(learning_rate=learning_rate, n_estimators=n_estimators, max_depth=max_depth,
gamma=gamma, subsample=0.6, objective='binary:logistic', nthread=4, scale_pos_weight=1)
xgb_model.fit(X_train, Y_train, eval_metric='auc')
return xgb_model
'''
def saveModel(model_name, model_fit):
with open(str(model_name)+'.pickle', 'wb') as model:
pickle.dump(model_fit, model)
def loadModel(model_name):
with open(model_name+'.pickle', 'rb') as model_file:
model = pickle.load(model_file)
return model
'''
def store_excel(good_learning_rate_default, good_n_estimators_default, good_max_depth_default, good_gamma_default):
headers = ['learning_rate', 'n_estimators', 'max_depth', 'gamma']
data = {
headers[0]: [good_learning_rate_default],
headers[1]: [good_n_estimators_default],
headers[2]: [good_max_depth_default],
headers[3]: [good_gamma_default],
}
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : pandas test
import numpy as np
import pandas as pd
# dataframe 은 데이터들을 컬럼 모양으로 묶어 표처럼 나탄낸다.
# 시리즈의를 묶어 2차원의 dataframe 구조를 만들 수 있다.
dic1 = {"name": "jane", "fruit": "lemon", "price": 1000}
dic2 = {"name": "bill", "fruit": "orange", "price": 2000}
dic3 = {"name": "tom", "fruit": "apple", "price": 3000}
dic4 = {"name": "alice", "fruit": "banana", "price": 4000}
s1 = pd.Series(dic1)
s2 = | pd.Series(dic2) | pandas.Series |
import argparse
import copy
import itertools
import os
import shutil
import time
import warnings
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.multiprocessing as mp
from src.utils.SREA_utils import single_experiment_SREA
from src.utils.global_var import OUTPATH
from src.utils.saver import Saver
from src.utils.utils import str2bool, map_abg_main, map_losstype, check_ziplen, remove_duplicates
######################################################################################################
warnings.filterwarnings("ignore")
torch.backends.cudnn.benchmark = True
columns = shutil.get_terminal_size().columns
| pd.set_option('display.max_rows', None) | pandas.set_option |
from DataHandler.DataHandler import DataHandler
import pandas
from Event.EventQueue import EVENT_QUEUE
from Event.Event import Event
import Information.Info as Info
# DEFAULT_COLUMN为默认的读取数据文件的列
DEFAULT_COLUMN = ["Symbol", "Date", "Time",
"Open", "High", "Low", "Close", "Volume", "Turnover"]
def series_to_bar(row: pandas.Series) -> Event:
"""
series_to_bar:根据给定的一行数据(pandas.Series),生成一个Bar事件
@row(pandas.Series):给定的一行数据(pandas.Series)
@return(Event):生成的Bar事件
"""
return Event(type_="Bar", datetime_=row["UpdateDateTime"],
info_=Info.BarInfo(symbol_=row["Symbol"], datetime_=row["UpdateDateTime"],
open_=float(row["Open"]), high_=float(row["High"]),
low_=float(row["Low"]), close_=float(row["Close"]),
volume_=0, turnover_=0))
class MADataHandler(DataHandler):
"""
MADataHandler(DataHandler):移动均线策略的输入数据处理模块
"""
__slots__ = ["symbol", "event_queue", "dataframe"]
def __init__(self):
self.dataframe = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 21:05:00 2020
Revised on Thur Mar 18 16:04:00 2021
@author: Starlitnightly
New Version 1.2.3
"""
import itertools
import numpy as np
import pandas as pd
from upsetplot import from_memberships
from upsetplot import plot
def FindERG(data, depth=2, sort_num=20, verbose=False, figure=False):
'''
Find out endogenous reference gene
Parameters
----------
data:pandas.DataFrmae
DataFrame of data points with each entry in the form:['gene_id','sample1',...]
depth:int
Accuracy of endogenous reference gene,must be larger that 2
The larger the number, the fewer genes are screened out,Accuracy improvement
sort_num:int
The size of the peendogenous reference gener filter
When the sample is large, it is recommended to increase the value
verbose: bool
Make the function noisy, writing times and results.
Returns
-------
result:list
a list of endogenous reference gene
'''
lp=[]
if verbose:
import time,datetime
start = time.time()
if depth==1:
print('the depth must larger than 2')
return
if len(data.columns)<=2:
print('the number of samples must larger than 2')
return
if depth>(len(data.columns)):
print('depth larger than samples')
return
count=0
result=[]#result
bucket_size = 1000
for i in itertools.combinations(data.columns[0:depth], 2):
if verbose:
start = time.time()
count=count+1
test=data.replace(0,np.nan).dropna()
last_std= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from trackintel.geogr.distances import check_gdf_planar, calculate_haversine_length
def calculate_modal_split(tpls_in, freq=None, metric="count", per_user=False, norm=False):
"""Calculate the modal split of triplegs
Parameters
----------
tpls_in : GeoDataFrame (as trackintel triplegs)
triplegs require the column `mode`.
freq : str
frequency string passed on as `freq` keyword to the pandas.Grouper class. If `freq=None` the modal split is
calculated on all data. A list of possible
values can be found `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset
-aliases>`_.
metric : {'count', 'distance', 'duration'}
Aggregation used to represent the modal split. 'distance' returns in the same unit as the crs. 'duration'
returns values in seconds.
per_user : bool, default: False
If True the modal split is calculated per user
norm : bool, default: False
If True every row of the modal split is normalized to 1
Returns
-------
modal_split : DataFrame
The modal split represented as pandas Dataframe with (optionally) a multi-index. The index can have the
levels: `('user_id', 'timestamp')` and every mode as a column.
Notes
------
`freq='W-MON'` is used for a weekly aggregation that starts on mondays.
If `freq=None` and `per_user=False` are passed the modal split collapses to a single column.
The modal split can be visualized using :func:`trackintel.visualization.modal_split.plot_modal_split`
Examples
--------
>>> triplegs.calculate_modal_split()
>>> tripleg.calculate_modal_split(freq='W-MON', metric='distance')
"""
tpls = tpls_in.copy()
# precalculate distance and duration if required
if metric == "distance":
if_planer_crs = check_gdf_planar(tpls)
if not if_planer_crs:
tpls["distance"] = calculate_haversine_length(tpls)
else:
tpls["distance"] = tpls.length
elif metric == "duration":
tpls["duration"] = tpls["finished_at"] - tpls["started_at"]
# create grouper
if freq is None:
if per_user:
tpls_grouper = tpls.groupby(["user_id", "mode"])
else:
tpls_grouper = tpls.groupby(["mode"])
else:
tpls.set_index("started_at", inplace=True)
tpls.index.name = "timestamp"
if per_user:
tpls_grouper = tpls.groupby(["user_id", "mode", pd.Grouper(freq=freq)])
else:
tpls_grouper = tpls.groupby(["mode", | pd.Grouper(freq=freq) | pandas.Grouper |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import nltk as nl
from difflib import SequenceMatcher
# %% BEST MATCH STRING
def findBestMatchingString(inputTable,compareStringList,old_label_column,new_label_column='MATCHED_STRING', matchingTreshold = 0.6, printMatchingString=True):
#la funzione cerca per ogni riga del dataframe inputTable nella colonna old_label_column,
# la stringa di bestmatch nella lista inputTable
# il risultato e' salvato nella colonna new_label_column
# l'accuratezza e' limitata ad un indice di matching specificato da matchingTreshold (e' bene non sia inferiore a 0.6)
#verifico che la colonna new_label_column esista
if new_label_column not in inputTable.columns:
inputTable[new_label_column]=np.nan
#force mapping destination
D_notMapped = list(set(inputTable[inputTable[new_label_column].isna()][old_label_column]))
for destinazione in D_notMapped:
#destinazione=D_notMapped[0]
conteggio = [SequenceMatcher(None, destinazione, stringa).ratio() for stringa in compareStringList]
checkBestfound = [count>matchingTreshold for count in conteggio]
if any(checkBestfound):
bestMatch = compareStringList[np.argmax(conteggio)]
if printMatchingString:
print(f"notMapped: {destinazione}, bestMatch: {bestMatch}")
inputTable[new_label_column].loc[inputTable[old_label_column]==destinazione] = bestMatch
return inputTable
# %% BAG OF WORDS MODEL
def getFrequencyKeyword(inputTable,minFrequency,weightColumn,maxLenTable=[]):
#take as input a dataframe with:
# - a column WORDTAG having keywords separated by ;
# - a column CONTEGGIO with the weight of each row of the dataframe
#return a dataframe with a single word and its frequency among all the table
dictionary= | pd.DataFrame(columns=['word','frequency']) | pandas.DataFrame |
import zimp_clf_client
import mlflow
import pandas as pd
import os
import time
import logging
from zimp_clf_client.rest import ApiException
from experiment.config import Config
from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score, precision_score, recall_score
def get_or_create_mlflow_experiment(experiment_name):
existing_exp = mlflow.get_experiment_by_name(experiment_name)
if existing_exp is not None:
return existing_exp
exp_id = mlflow.create_experiment(experiment_name)
return mlflow.get_experiment(exp_id)
class Experiment:
def __init__(self, config: Config):
self.config = config
# init classification API
configuration = zimp_clf_client.Configuration()
configuration.host = config.classification_service_url
api_client = zimp_clf_client.ApiClient(configuration=configuration)
api_client.rest_client.pool_manager.connection_pool_kw['retries'] = 10 # in case api is unstable
self.train_api = zimp_clf_client.TrainingApi(api_client)
self.predict_api = zimp_clf_client.PredictionApi(api_client)
self.download_api = zimp_clf_client.DownloadApi(api_client)
# init mlflow API
mlflow.set_tracking_uri(config.mlflow_url)
self.mlflow_experiment = get_or_create_mlflow_experiment(config.experiment_name)
# resource paths
self.train_path = os.path.join('resources', config.dataset, 'train.csv')
self.test_path = os.path.join('resources', config.dataset, 'test.csv')
def run(self):
with mlflow.start_run(experiment_id=self.mlflow_experiment.experiment_id,
run_name=self.config.run_name) as mlflow_run:
mlflow.log_param('model_type', self.config.model_type)
mlflow.log_param('zimp_mechanism', 'None')
mlflow.log_param('random_seed', self.config.random_seed)
mlflow.log_param('dataset', self.config.dataset)
# TRAIN
ref_time = time.time()
self.train_api.clf_train_post(file=self.train_path, model_type=self.config.model_type,
seed=self.config.random_seed, asynchronous='true')
self.wait_for_train_completion() # poll api until training is completed
mlflow.log_metric('train_time_sec', time.time() - ref_time)
# EVAL TRAIN
ref_time = time.time()
self.predict_file_async(self.train_path, metric_prefix='train_')
mlflow.log_metric('train_predict_time_sec', time.time() - ref_time)
# EVAL TEST
ref_time = time.time()
self.predict_file_async(self.test_path, metric_prefix='test_')
mlflow.log_metric('test_predict_time_sec', time.time() - ref_time)
self.store_model()
logging.debug(self.train_api.clf_training_status_get())
def exists_in_mlflow(self) -> bool:
"""
:return: True if a successful experiment exists in mlflow which has the same run name
"""
run_cnt = mlflow.search_runs(
experiment_ids=[self.mlflow_experiment.experiment_id],
filter_string=f'tags."mlflow.runName"="{self.config.run_name}" attributes.status="FINISHED"').shape[0]
return run_cnt > 0
def store_model(self):
"""
retrieves trained model from clf-api and stores it in mlflow
:return:
"""
model_path = 'resources/model'
binary_file = self.download_api.clf_download_get(_preload_content=False).data
with open(model_path, 'wb') as f:
f.write(binary_file)
mlflow.log_artifact(model_path)
def predict_file_async(self, file_path, metric_prefix=""):
"""
sends complete file for prediction and polls for completion
:param file_path: path to the file which should be predicted ('text', 'target')
:return:
"""
tmp_file = 'prediction_input.csv'
df_pred = pd.read_csv(file_path)
df_pred['text'].to_csv(tmp_file, index=False)
result_id = self.predict_api.clf_file_predict_proba_post(file=tmp_file)['resultId']
self.wait_for_predict_completion(file_path, result_id, metric_prefix)
def get_predictions_for_file(self, file_path):
"""
retrieves predictions and related certainty for all texts in the supplied file
:param file_path: path to the file which should be predicted ('text', 'target')
:return: pandas df which contains loaded data plus prediction and certainty cols
"""
batch_size = 6 if self.config.model_type == 'BERT' else 128 # OOM-exception for BERT
df_pred = pd.read_csv(file_path)
df_pred['prediction'] = ''
df_pred['certainty'] = 0
df_pred['target'] = df_pred['target'].astype(str)
for idx in range(0, df_pred.shape[0], batch_size):
clf_response = self.get_api_prediction({'n': 1, 'texts': df_pred.loc[idx:idx+batch_size-1, 'text'].tolist()})
df_pred.loc[idx:idx+batch_size-1, 'prediction'] = [res['labels'][0]['label'] for res in clf_response]
df_pred.loc[idx:idx+batch_size-1, 'certainty'] = [res['labels'][0]['probability'] for res in clf_response]
return df_pred
def get_api_prediction(self, request_body):
"""
wrapper for predict_proba API call which adds a retry in case of gateway errors (may happen with slow bert model)
:param request_body:
:return:
"""
attempt_count = 0
while attempt_count < 10:
if attempt_count > 0:
logging.info("Retry API CALL")
try:
clf_response = self.predict_api.clf_m_predict_proba_post(body=request_body)
except ApiException as e:
logging.warning("Predict Call failed", e)
attempt_count += 1
else:
return clf_response
raise ApiException(0, 'API-Call fails consistently. Pleas check logs')
def safe_get_status(self):
try:
train_state = self.train_api.clf_training_status_get()
return train_state['isTrained']
except ApiException as e:
logging.warning("Status Call failed", e)
return False
def wait_for_train_completion(self):
wait_time = 1
while True:
if self.safe_get_status() :
logging.info('Training completed.')
break
logging.info(f'Training not completed. Waiting for {int(wait_time)} seconds..')
time.sleep(int(wait_time))
wait_time += 0.1
def safe_get_predictions(self, result_id, prediction_path):
try:
csv_file = self.download_api.clf_file_predictions_id_get(id=result_id, _preload_content=False).data
with open(prediction_path, 'wb') as f:
f.write(csv_file)
except ApiException as e:
logging.warning("Prediction Poll Call failed", e)
if not os.path.exists(prediction_path):
return pd.DataFrame()
return | pd.read_csv(prediction_path) | pandas.read_csv |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}],
u('fxVersion'): u('9.0'),
u('location'): u('zh-CN'),
u('operatingSystem'): u('WINNT Windows NT 5.1'),
u('surveyAnswers'): u(''),
u('task_guid'): u('d69fbd15-2517-45b5-8a17-bb7354122a75'),
u('tpVersion'): u('1.2'),
u('updateChannel'): u('beta')},
u('survey_data'): {
u('extensions'): [{u('appDisabled'): False,
u('id'): u('testpilot?labs.mozilla.com'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('Test Pilot')},
{u('appDisabled'): True,
u('id'): u('dict?www.youdao.com'),
u('isCompatible'): False,
u('isEnabled'): False,
u('isPlatformCompatible'): True,
u('name'): u('Youdao Word Capturer')},
{u('appDisabled'): False,
u('id'): u('jqs?sun.com'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('Java Quick Starter')},
{u('appDisabled'): False,
u('id'): | u('?20a82645-c095-46ed-80e3-08825760534b?') | pandas.compat.u |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
from rasa_sdk.events import Restarted
from rasa_sdk.events import AllSlotsReset
import zomatopy
import json
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import re
import pandas as pd
import numpy as np
tier_1_2_cities = ['Agra', 'Ahmedabad', 'Ajmer', 'Aligarh', 'Amravati', 'Amritsar', 'Asansol', 'Aurangabad', 'Bareilly', 'Belgaum',\
'Bengaluru', 'Bhavnagar', 'Durg Bhilai', 'Mumbai', 'Bhopal', 'Bhubaneswar', 'Bijapur', 'Bikaner', 'Bilaspur',\
'Bokaro', 'Chandigarh', 'Chennai', 'Coimbatore', 'Cuttack', 'Dehradun', 'Delhi NCR', 'Dhanbad', 'Dindigul', \
'Durgapur', 'Erode', 'Delhi NCR', 'Firozabad', 'Delhi NCR', 'Gorakhpur', 'Gulbarga', 'Guntur', 'Delhi NCR', \
'Guwahati', 'Gwalior', 'Hamirpur', 'Dharwad', 'Hyderabad', 'Indore', 'Jabalpur', 'Jaipur', 'Jalandhar', 'Jammu', \
'Jamnagar', 'Jamshedpur', 'Jhansi', 'Jodhpur', 'Kakinada', 'Kannur', 'Kanpur', 'Karnal', 'Kochi', 'Kolhapur', 'Kolkata',\
'Kollam', 'Kozhikode', 'Kurnool', 'Lucknow', 'Ludhiana', 'Madurai', 'Malappuram', 'Mangalore', 'Mathura', 'Meerut', \
'Moradabad', 'Mumbai', 'Mysore', 'Nagpur', 'Nanded', 'Nashik', 'Nellore', 'Delhi NCR', 'Patna', 'Chennai',\
'Allahabad', 'Pune', 'Purulia', 'Raipur', 'Rajahmundry', 'Rajkot', 'Ranchi', 'Rourkela', 'Salem', 'Sangli',\
'Shimla', 'Siliguri', 'Solapur', 'Srinagar', 'Surat', 'Chennai', 'Trivandrum', 'Thrissur','Vadodara','Varanasi',\
'Ujjain','Virar','Tirunelveli','Vellore','Vijayawada','Visakhapatnam','Warangal']
##List of Tier 1 and Tier 2 cities
tier_1_2_city_names= [city.lower() for city in tier_1_2_cities]
##Validating Location
def Check_Location(loc, city_names= tier_1_2_city_names):
config={"user_key":"337f3a03601af0bbcc30b2e3506be18d"}
zomato = zomatopy.initialize_app(config)
location_detail=zomato.get_location(loc, 1)
location_json = json.loads(location_detail)
number_of_loc = len(location_json['location_suggestions'])
try:
if number_of_loc==0:
return {'location_result': 'Not Found!', 'location_name': None}
elif (location_json['location_suggestions'][0]['city_name']).lower() not in tier_1_2_city_names:
return {'location_result': "Sorry! We do not operate in this area yet.", 'location_name': None}
else:
return {'location_result': "Location Found!", 'location_name': location_json['location_suggestions'][0]['city_name']}
except:
dispatcher.utter_message("Sorry, please enter a valid request!")
class ActionCheckLocation(Action):
def name(self):
return 'action_check_location'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
check= Check_Location(loc)
return[SlotSet('location',check['location_name'])]
class Actionvalidatecuisine(Action):
def name(self):
return 'action_validate_cuisine'
def run(self,dispatcher,tracker,domain):
cuisine_list = ['chinese','mexican','italian','american','south indian','north indian']
requested_cuisine = tracker.get_slot('cuisine')
if requested_cuisine is not None:
if requested_cuisine.lower() in cuisine_list:
return[SlotSet('cuisine', requested_cuisine)]
else:
dispatcher.utter_message("Sorry, the requested cuisine is invalid. Please provide a valid cuisine.")
return[SlotSet('cuisine', None)]
else:
dispatcher.utter_message("Sorry, I could not understand the requested cuisine. Please re-enter the cuisine.")
return [SlotSet('cuisine', None)]
class ActionAskBudget(Action):
def name(self):
return 'action_ask_budget'
def run(self,dispatcher,tracker,domain):
high_list= ['more than 700', 'more than rs. 700', 'more than rs 700', 'more 700', '>700', '> 700', 'high', 'elite', 'expensive', 'luxurious', '700+', '700 plus', 'greater than 700', 'higher than 700', 'more than 700', 'greater 700', 'costly']
low_list=['lesser than rs. 300', 'lesser than rs.300', 'lesser than rs300', 'lesser than rs. 300','less 300', 'lesser than rs 300', 'affordable', 'less than rs 300', 'lesser than 300', 'less than 300', '<300', '< 300', 'max 300', 'below 300', 'until 300', 'low range', 'low', 'limit 300', 'max lim 300', 'max limit 300', 'max budget 300', 'less than rs. 300']
mid_list= ['between 300 and 700','between rs.300 to 700', 'between rs300 to 700', 'rs. 300 to 700', '300-700', 'between 300-700', 'between rs. 300 to 700', 'between rs 300 to 700', 'between 300 to 700', '300 to 700', 'mid range', 'mid', 'moderate price range', 'moderate range', 'moderate']
requested_budget = tracker.get_slot('budget')
requested_budget_lower = (requested_budget.lower()).strip()
try:
if requested_budget_lower in low_list:
return ([SlotSet('budget', 'low')])
elif requested_budget_lower in high_list:
return ([SlotSet('budget', 'high')])
elif requested_budget_lower in mid_list:
return ([SlotSet('budget', 'mid')])
else:
dispatcher.utter_message("Sorry, the budget entry is invalid. Please re-enter a valid request!")
return ([SlotSet('budget', None)])
except:
dispatcher.utter_message("Sorry, the entry is invalid. Please re-enter a valid request!")
return ([SlotSet('budget', None)])
class ActionSearchRestaurants(Action):
def name(self):
return 'action_search_restaurants'
def run(self, dispatcher, tracker, domain):
config = {"user_key": "337f3a03601af0bbcc30b2e3506be18d"}
zomato = zomatopy.initialize_app(config)
loc = tracker.get_slot('location')
cuisine = tracker.get_slot('cuisine')
location_detail = zomato.get_location(loc, 1)
budget_detail = tracker.get_slot('budget')
if budget_detail == 'low':
min_val = 0
max_val = 300
elif budget_detail == 'mid':
min_val = 301
max_val = 700
else:
min_val = 701
max_val = 10000000
d1 = json.loads(location_detail)
lat = d1["location_suggestions"][0]["latitude"]
lon = d1["location_suggestions"][0]["longitude"]
cuisines_dict = {'american': 1, 'mexican': 73, 'chinese': 25, 'italian': 55, 'north indian': 50, 'south indian': 85}
results = zomato.restaurant_search("", lat, lon, str(cuisines_dict.get(cuisine)), 20)
d = json.loads(results)
results_shown = int(d['results_shown'])
name = []
location = []
avg_cost = []
agg_rating = []
response = ""
for i in range(0, results_shown):
name.append(d['restaurants'][i]['restaurant']['name'])
location.append(d['restaurants'][i]['restaurant']['location']['address'])
avg_cost.append(int(d['restaurants'][i]['restaurant']['average_cost_for_two']))
agg_rating.append(float(d['restaurants'][i]['restaurant']['user_rating']['aggregate_rating']))
df_display = pd.DataFrame({'Name': name, 'Location': location, 'average_cost_for_two': avg_cost, 'Ratings': agg_rating})
df_display = df_display[(df_display['average_cost_for_two'] >= min_val) & (df_display['average_cost_for_two'] <= max_val)].sort_values('Ratings', ascending=False)
df_display = df_display.head(5)
if len(df_display)!=0:
for index , rec in df_display.iterrows():
response = response + rec['Name'] + " in " + rec['Location'] + " " + "has been rated " + \
str(rec['Ratings']) + "\n"
dispatcher.utter_message("" + response)
return [SlotSet('location', loc)]
else:
dispatcher.utter_message("No records found!")
class ActionValidateEmail(Action):
def name(self):
return 'action_check_email'
def run(self, dispatcher, tracker, domain):
user_email = tracker.get_slot('email')
regex = '^[a-zA-Z0-9_.%+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
if user_email is not None:
if (re.search(regex, user_email)):
return [SlotSet('email', user_email)]
else:
dispatcher.utter_message("Sorry, the provided email is invalid, please recheck the email id provided ")
return [SlotSet('email', None)]
else:
dispatcher.utter_message("Sorry, I could'nt understand the input provided. Please provide a valid email id again.")
return [SlotSet('email', None)]
class ActionSendEmail(Action):
def name(self):
return 'action_send_email'
def run(self,dispatcher, tracker, domain):
try:
config = {"user_key": "337f3a03601af0bbcc30b2e3506be18d"}
zomato = zomatopy.initialize_app(config)
loc = tracker.get_slot('location')
cuisine = tracker.get_slot('cuisine')
location_detail = zomato.get_location(loc, 1)
budget_detail = tracker.get_slot('budget')
if budget_detail == 'low':
min_val = 0
max_val = 300
elif budget_detail == 'mid':
min_val = 301
max_val = 700
else:
min_val = 701
max_val = 1000000
d1 = json.loads(location_detail)
lat = d1["location_suggestions"][0]["latitude"]
lon = d1["location_suggestions"][0]["longitude"]
cuisines_dict = {'american': 1, 'mexican': 73, 'chinese': 25, 'italian': 55, 'north indian': 50,'south indian': 85}
results = zomato.restaurant_search("", lat, lon, str(cuisines_dict.get(cuisine)), 20)
d = json.loads(results)
results_shown = int(d['results_shown'])
name = []
location = []
avg_cost = []
agg_rating = []
for i in range(0, results_shown):
name.append(d['restaurants'][i]['restaurant']['name'])
location.append(d['restaurants'][i]['restaurant']['location']['address'])
avg_cost.append(int(d['restaurants'][i]['restaurant']['average_cost_for_two']))
agg_rating.append(float(d['restaurants'][i]['restaurant']['user_rating']['aggregate_rating']))
df = | pd.DataFrame({'Restaurant Name': name, 'Restaurant locality address': location, 'Average budget for two people': avg_cost, 'Zomato user rating': agg_rating}) | pandas.DataFrame |
import pandas as pd
import pytest
from numpy import inf, nan, testing
from toucan_data_sdk.utils.postprocess import waterfall
@pytest.fixture
def sample_data():
return [
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't1',
'played': 12,
},
{
'ord': 10,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'clap clap',
'date': 't1',
'played': 1,
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't1',
'played': 1,
},
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't2',
'played': 10,
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't2',
'played': 100,
},
{
'ord': 1,
'category_name': 'Tom',
'category_id': 'tom',
'product_id': 'bom',
'date': 't2',
'played': 1,
},
]
@pytest.fixture
def sample_filter_data():
return [
# filterA = 'mickey', filterB = 'dodo'
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't1',
'played': 15,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'ord': 10,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'clap clap',
'date': 't1',
'played': 5,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't1',
'played': 10,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't2',
'played': 17,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't2',
'played': 20,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Tom',
'category_id': 'tom',
'product_id': 'bom',
'date': 't2',
'played': 50,
'filterA': 'mickey',
'filterB': 'dodo',
},
# filterA = 'donald', filterB = 'dodo'
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't1',
'played': 12,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'ord': 10,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'clap clap',
'date': 't1',
'played': 1,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't1',
'played': 1,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't2',
'played': 10,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't2',
'played': 100,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'ord': 1,
'category_name': 'Tom',
'category_id': 'tom',
'product_id': 'bom',
'date': 't2',
'played': 1,
'filterA': 'donald',
'filterB': 'dodo',
},
# filterA = 'mickey', filterB = 'dada'
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't1',
'played': 15,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'ord': 10,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'clap clap',
'date': 't1',
'played': 5,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't1',
'played': 10,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't2',
'played': 17,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't2',
'played': 20,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Tom',
'category_id': 'tom',
'product_id': 'bom',
'date': 't2',
'played': 50,
'filterA': 'mickey',
'filterB': 'dada',
},
# filterA = 'donald', filterB = 'dada'
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't1',
'played': 12,
'filterA': 'donald',
'filterB': 'dada',
},
{
'ord': 10,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'clap clap',
'date': 't1',
'played': 1,
'filterA': 'donald',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't1',
'played': 1,
'filterA': 'donald',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Clap',
'category_id': 'clap',
'product_id': 'super clap',
'date': 't2',
'played': 10,
'filterA': 'donald',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Snare',
'category_id': 'snare',
'product_id': 'tac',
'date': 't2',
'played': 100,
'filterA': 'donald',
'filterB': 'dada',
},
{
'ord': 1,
'category_name': 'Tom',
'category_id': 'tom',
'product_id': 'bom',
'date': 't2',
'played': 1,
'filterA': 'donald',
'filterB': 'dada',
},
]
def test_waterfall(sample_data):
""" It should return value for waterfall """
kwargs = {
'upperGroup': {'id': 'category_id', 'label': 'category_name'},
'insideGroup': {'id': 'product_id', 'groupsOrder': 'ord'},
'date': 'date',
'value': 'played',
'start': {'label': 'Trimestre 1', 'id': 't1'},
'end': {'label': 'Trimester 2', 'id': 't2'},
}
expected = [
{
'variation': nan,
'label': 'Trimestre 1',
'value': 14.0,
'groups': 'Trimestre 1',
'type': nan,
'order': nan,
},
{
'variation': -0.23076923076923078,
'label': 'Clap',
'value': -3.0,
'groups': 'clap',
'type': 'parent',
'order': nan,
},
{
'variation': -0.16666666666666666,
'label': 'super clap',
'value': -2.0,
'groups': 'clap',
'type': 'child',
'order': 1.0,
},
{
'variation': -1.000000,
'label': 'clap clap',
'value': -1.0,
'groups': 'clap',
'type': 'child',
'order': 10.0,
},
{
'variation': 99.0,
'label': 'Snare',
'value': 99.0,
'groups': 'snare',
'type': 'parent',
'order': nan,
},
{
'variation': 99.0,
'label': 'tac',
'value': 99.0,
'groups': 'snare',
'type': 'child',
'order': 1.0,
},
{
'variation': inf,
'label': 'Tom',
'value': 1.0,
'groups': 'tom',
'type': 'parent',
'order': nan,
},
{
'variation': inf,
'label': 'bom',
'value': 1.0,
'groups': 'tom',
'type': 'child',
'order': 1.0,
},
{
'variation': nan,
'label': 'Trimester 2',
'value': 111.0,
'groups': 'Trimester 2',
'type': nan,
'order': nan,
},
]
df = pd.DataFrame(sample_data)
df = waterfall(df, **kwargs)
wa = [{k: v for k, v in zip(df.columns, row)} for row in df.values]
assert wa[0].keys() == expected[0].keys()
for i in range(len(expected)):
testing.assert_equal(wa[i], expected[i])
def test_waterfall_filter_simple(sample_filter_data):
""" It should return value for waterfall """
kwargs = {
'upperGroup': {'id': 'product_id', 'groupsOrder': 'ord'},
'filters': 'filterA',
'date': 'date',
'value': 'played',
'start': {'label': 'Trimestre 1', 'id': 't1'},
'end': {'label': 'Trimester 2', 'id': 't2'},
}
expected = [
# mickey
{
'groups': 'Trimestre 1',
'label': 'Trimestre 1',
'type': nan,
'value': 30.0,
'variation': nan,
'order': nan,
'filterA': 'mickey',
},
{
'groups': 'bom',
'label': 'bom',
'type': 'parent',
'value': 50.0,
'variation': inf,
'order': 1.0,
'filterA': 'mickey',
},
{
'groups': 'super clap',
'label': 'super clap',
'type': 'parent',
'value': 2.0,
'variation': 0.13333333333333333,
'order': 1.0,
'filterA': 'mickey',
},
{
'groups': 'tac',
'label': 'tac',
'type': 'parent',
'value': 10.0,
'variation': 1.0,
'order': 1.0,
'filterA': 'mickey',
},
{
'groups': 'clap clap',
'label': 'clap clap',
'type': 'parent',
'value': -5.0,
'variation': -1.0,
'order': 10.0,
'filterA': 'mickey',
},
{
'groups': 'Trimester 2',
'label': 'Trimester 2',
'type': nan,
'value': 87.0,
'variation': nan,
'order': nan,
'filterA': 'mickey',
},
# donald
{
'groups': 'Trimestre 1',
'label': 'Trimestre 1',
'type': nan,
'value': 14.0,
'variation': nan,
'order': nan,
'filterA': 'donald',
},
{
'groups': 'bom',
'label': 'bom',
'type': 'parent',
'value': 1.0,
'variation': inf,
'order': 1.0,
'filterA': 'donald',
},
{
'groups': 'super clap',
'label': 'super clap',
'type': 'parent',
'value': -2.0,
'variation': -0.16666666666666666,
'order': 1.0,
'filterA': 'donald',
},
{
'groups': 'tac',
'label': 'tac',
'type': 'parent',
'value': 99.0,
'variation': 99.0,
'order': 1.0,
'filterA': 'donald',
},
{
'groups': 'clap clap',
'label': 'clap clap',
'type': 'parent',
'value': -1.0,
'variation': -1.0,
'order': 10.0,
'filterA': 'donald',
},
{
'groups': 'Trimester 2',
'label': 'Trimester 2',
'type': nan,
'value': 111.0,
'variation': nan,
'order': nan,
'filterA': 'donald',
},
]
df = pd.DataFrame(sample_filter_data).query("filterB == 'dodo'")
df = waterfall(df, **kwargs).copy()
wa = [{k: v for k, v in zip(df.columns, row)} for row in df.values]
assert set(wa[0].keys()) == set(expected[0].keys())
for i in range(len(expected)):
testing.assert_equal(wa[i], expected[i])
def test_waterfall_filter_two_col(sample_filter_data):
""" It should return value for waterfall """
kwargs = {
'upperGroup': {'id': 'product_id', 'groupsOrder': 'ord'},
'filters': ['filterA', 'filterB'],
'date': 'date',
'value': 'played',
'start': {'label': 'Trimestre 1', 'id': 't1'},
'end': {'label': 'Trimester 2', 'id': 't2'},
}
expected = [
# mickey - dodo
{
'groups': 'Trimestre 1',
'label': 'Trimestre 1',
'type': nan,
'value': 30.0,
'variation': nan,
'order': nan,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'groups': 'bom',
'label': 'bom',
'type': 'parent',
'value': 50.0,
'variation': inf,
'order': 1.0,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'groups': 'super clap',
'label': 'super clap',
'type': 'parent',
'value': 2.0,
'variation': 0.13333333333333333,
'order': 1.0,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'groups': 'tac',
'label': 'tac',
'type': 'parent',
'value': 10.0,
'variation': 1.0,
'order': 1.0,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'groups': 'clap clap',
'label': 'clap clap',
'type': 'parent',
'value': -5.0,
'variation': -1.0,
'order': 10.0,
'filterA': 'mickey',
'filterB': 'dodo',
},
{
'groups': 'Trimester 2',
'label': 'Trimester 2',
'type': nan,
'value': 87.0,
'variation': nan,
'order': nan,
'filterA': 'mickey',
'filterB': 'dodo',
},
# donald - dodo
{
'groups': 'Trimestre 1',
'label': 'Trimestre 1',
'type': nan,
'value': 14.0,
'variation': nan,
'order': nan,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'groups': 'bom',
'label': 'bom',
'type': 'parent',
'value': 1.0,
'variation': inf,
'order': 1.0,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'groups': 'super clap',
'label': 'super clap',
'type': 'parent',
'value': -2.0,
'variation': -0.16666666666666666,
'order': 1.0,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'groups': 'tac',
'label': 'tac',
'type': 'parent',
'value': 99.0,
'variation': 99.0,
'order': 1.0,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'groups': 'clap clap',
'label': 'clap clap',
'type': 'parent',
'value': -1.0,
'variation': -1.0,
'order': 10.0,
'filterA': 'donald',
'filterB': 'dodo',
},
{
'groups': 'Trimester 2',
'label': 'Trimester 2',
'type': nan,
'value': 111.0,
'variation': nan,
'order': nan,
'filterA': 'donald',
'filterB': 'dodo',
},
# mickey - dada
{
'groups': 'Trimestre 1',
'label': 'Trimestre 1',
'type': nan,
'value': 30.0,
'variation': nan,
'order': nan,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'groups': 'bom',
'label': 'bom',
'type': 'parent',
'value': 50.0,
'variation': inf,
'order': 1.0,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'groups': 'super clap',
'label': 'super clap',
'type': 'parent',
'value': 2.0,
'variation': 0.13333333333333333,
'order': 1.0,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'groups': 'tac',
'label': 'tac',
'type': 'parent',
'value': 10.0,
'variation': 1.0,
'order': 1.0,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'groups': 'clap clap',
'label': 'clap clap',
'type': 'parent',
'value': -5.0,
'variation': -1.0,
'order': 10.0,
'filterA': 'mickey',
'filterB': 'dada',
},
{
'groups': 'Trimester 2',
'label': 'Trimester 2',
'type': nan,
'value': 87.0,
'variation': nan,
'order': nan,
'filterA': 'mickey',
'filterB': 'dada',
},
# donald - dada
{
'groups': 'Trimestre 1',
'label': 'Trimestre 1',
'type': nan,
'value': 14.0,
'variation': nan,
'order': nan,
'filterA': 'donald',
'filterB': 'dada',
},
{
'groups': 'bom',
'label': 'bom',
'type': 'parent',
'value': 1.0,
'variation': inf,
'order': 1.0,
'filterA': 'donald',
'filterB': 'dada',
},
{
'groups': 'super clap',
'label': 'super clap',
'type': 'parent',
'value': -2.0,
'variation': -0.16666666666666666,
'order': 1.0,
'filterA': 'donald',
'filterB': 'dada',
},
{
'groups': 'tac',
'label': 'tac',
'type': 'parent',
'value': 99.0,
'variation': 99.0,
'order': 1.0,
'filterA': 'donald',
'filterB': 'dada',
},
{
'groups': 'clap clap',
'label': 'clap clap',
'type': 'parent',
'value': -1.0,
'variation': -1.0,
'order': 10.0,
'filterA': 'donald',
'filterB': 'dada',
},
{
'groups': 'Trimester 2',
'label': 'Trimester 2',
'type': nan,
'value': 111.0,
'variation': nan,
'order': nan,
'filterA': 'donald',
'filterB': 'dada',
},
]
df = pd.DataFrame(sample_filter_data)
df = waterfall(df, **kwargs).copy()
wa = [{k: v for k, v in zip(df.columns, row)} for row in df.values]
assert set(wa[0].keys()) == set(expected[0].keys())
for i in range(len(expected)):
testing.assert_equal(wa[i], expected[i])
def test_waterfall_upperGroup_groupsOrder(sample_data):
for line in sample_data:
line['category_order'] = len(line['category_name'])
del line['ord']
kwargs = {
'upperGroup': {
'id': 'category_id',
'label': 'category_name',
'groupsOrder': 'category_order',
},
'insideGroup': {'id': 'product_id'},
'date': 'date',
'value': 'played',
'start': {'label': 'Trimestre 1', 'id': 't1'},
'end': {'label': 'Trimester 2', 'id': 't2'},
}
expected = [
{
'variation': nan,
'label': 'Trimestre 1',
'value': 14.0,
'groups': 'Trimestre 1',
'type': nan,
'order': nan,
},
{
'variation': inf,
'label': 'Tom',
'value': 1.0,
'groups': 'tom',
'type': 'parent',
'order': 3.0,
},
{
'variation': inf,
'label': 'bom',
'value': 1.0,
'groups': 'tom',
'type': 'child',
'order': nan,
},
{
'variation': -0.23076923076923078,
'label': 'Clap',
'value': -3.0,
'groups': 'clap',
'type': 'parent',
'order': 4.0,
},
{
'variation': -1.0,
'label': 'clap clap',
'value': -1.0,
'groups': 'clap',
'type': 'child',
'order': nan,
},
{
'variation': -0.16666666666666666,
'label': 'super clap',
'value': -2.0,
'groups': 'clap',
'type': 'child',
'order': nan,
},
{
'variation': 99.0,
'label': 'Snare',
'value': 99.0,
'groups': 'snare',
'type': 'parent',
'order': 5.0,
},
{
'variation': 99.0,
'label': 'tac',
'value': 99.0,
'groups': 'snare',
'type': 'child',
'order': nan,
},
{
'variation': nan,
'label': 'Trimester 2',
'value': 111.0,
'groups': 'Trimester 2',
'type': nan,
'order': nan,
},
]
df = | pd.DataFrame(sample_data) | pandas.DataFrame |
from pandas.core.common import notnull, isnull
import pandas.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert isnull(np.inf)
assert isnull(-np.inf)
def test_any_none():
assert(common._any_none(1, 2, 3, None))
assert(not common._any_none(1, 2, 3, 4))
def test_all_not_none():
assert(common._all_not_none(1, 2, 3, 4))
assert(not common._all_not_none(1, 2, 3, None))
assert(not common._all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(len(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = common.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(common.iterpairs(data))
assert(result == expected)
def test_indent():
s = 'a b c\nd e f'
result = | common.indent(s, spaces=6) | pandas.core.common.indent |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 09:34:08 2017
@author: <NAME>
Answer query script: This script contains functions to query and manipulate DLR survey answer sets. It references datasets that must be stored in a /data/tables subdirectory in the parent directory.
"""
import numpy as np
import pandas as pd
import os
from glob import glob
import json
import feather
from support import feature_dir, fdata_dir, InputError, writeLog, validYears, table_dir
def loadTable(name, query=None, columns=None):
"""
This function loads all feather tables in filepath into workspace.
"""
dir_path = os.path.join(table_dir, 'feather')
file = os.path.join(dir_path, name +'.feather')
d = feather.read_dataframe(file)
if columns is None:
table = d
else:
table = d[columns]
try:
return table
except UnboundLocalError:
return('Could not find table with name '+name)
def loadID():
"""
This function subsets Answer or Profile IDs by year. Tables variable can be constructred with loadTables() function. Year input can be number or string. id_name is AnswerID or ProfileID.
"""
groups = loadTable('groups')
links = loadTable('links')
profiles = loadTable('profiles')
# a_id = links[(links.GroupID != 0) & (links['AnswerID'] != 0)].drop(columns=['ConsumerID','lock','ProfileID'])
p_id = links[(links.GroupID != 0) & (links['ProfileID'] != 0)].drop(labels=['ConsumerID','lock','AnswerID'], axis=1)
profile_meta = profiles.merge(p_id, how='left', left_on='ProfileId', right_on='ProfileID').drop(labels=['ProfileId','lock'], axis=1)
ap = links[links.GroupID==0].drop(labels=['ConsumerID','lock','GroupID'], axis=1)
x = profile_meta.merge(ap, how='outer', on = 'ProfileID')
join = x.merge(groups, on='GroupID', how='left')
#Wrangling data into right format
all_ids = join[join['Survey'] != 'Namibia'] # remove Namibia
all_ids = all_ids.dropna(subset=['GroupID','Year'])
all_ids.Year = all_ids.Year.astype(int)
all_ids.GroupID = all_ids.GroupID.astype(int)
all_ids.AnswerID.fillna(0, inplace=True)
all_ids.AnswerID = all_ids.AnswerID.astype(int)
all_ids.ProfileID = all_ids.ProfileID.astype(int)
return all_ids
def idsDuplicates():
ids = loadID()
i = ids[(ids.duplicated('AnswerID')==True)&(ids['AnswerID']!=0)]
ip = i.pivot_table(index='Year',columns='AnswerID',values='ProfileID',aggfunc='count')
return ip.T.describe()
def matchAIDToPID(year, pp):
#TODO still needs checking --- think about integrating with socios.loadID -> all PIDs and the 0 where there is no corresponding AID
a_id = loadID(year, id_name = 'AnswerID')['id']
# p_id = socios.loadID(year, id_name = 'ProfileID')['id']
#get dataframe of linkages between AnswerIDs and ProfileIDs
links = loadTable('links')
# year_links = links[links.ProfileID.isin(p_id)]
year_links = links[links.AnswerID.isin(a_id)]
year_links = year_links.loc[year_links.ProfileID != 0, ['AnswerID','ProfileID']]
#get profile metadata (recorder ID, recording channel, recorder type, units of measurement)
profiles = loadTable('profiles')
#add AnswerID information to profiles metadata
profile_meta = year_links.merge(profiles, left_on='ProfileID', right_on='ProfileId').drop('ProfileId', axis=1)
VI_profile_meta = profile_meta.loc[(profile_meta['Unit of measurement'] == 2), :] #select current profiles only
#THIS IS NB!!
output = pp.merge(VI_profile_meta.loc[:,['AnswerID','ProfileID']], left_on='ProfileID_i', right_on='ProfileID').drop(['ProfileID','Valid_i','Valid_v'], axis=1)
output = output[output.columns.sort_values()]
output.fillna({'valid_calculated':0}, inplace=True)
return output
def loadQuestions(dtype = None):
"""
This function gets all questions.
"""
qu = loadTable('questions').drop(labels='lock', axis=1)
qu.Datatype = qu.Datatype.astype('category')
qu.Datatype.cat.categories = ['blob','char','num']
qu['ColumnAlias'] = [x.strip() for x in qu['ColumnAlias']]
if dtype is None:
pass
else:
qu = qu[qu.Datatype == dtype]
return qu
def loadAnswers():
"""
This function returns all answer IDs and their question responses for a selected data type. If dtype is None, answer IDs and their corresponding questionaire IDs are returned instead.
"""
answer_meta = loadTable('answers', columns=['AnswerID', 'QuestionaireID'])
blob = loadTable('answers_blob_anonymised').drop(labels='lock', axis=1)
blob = blob.merge(answer_meta, how='left', on='AnswerID')
blob.fillna(np.nan, inplace = True)
char = loadTable('answers_char_anonymised').drop(labels='lock', axis=1)
char = char.merge(answer_meta, how='left', on='AnswerID')
char.fillna(np.nan, inplace = True)
num = loadTable('answers_number_anonymised').drop(labels='lock', axis=1)
num = num.merge(answer_meta, how='left', on='AnswerID')
num.fillna(np.nan, inplace = True)
return {'blob':blob, 'char':char, 'num':num}
def searchQuestions(search = None):
"""
Searches questions for a search term, taking questionaire ID and question data type (num, blob, char) as input.
A single search term can be specified as a string, or a list of search terms as list.
"""
questions = loadTable('questions').drop(labels='lock', axis=1)
questions.Datatype = questions.Datatype.astype('category')
questions.Datatype.cat.categories = ['blob','char','num']
if search is None:
searchterm = ''
else:
searchterm = search.replace(' ', '+')
trantab = str.maketrans({'(':'', ')':'', ' ':'', '/':''})
result = questions.loc[questions.Question.str.translate(trantab).str.contains(searchterm, case=False), ['Question', 'Datatype','QuestionaireID', 'ColumnNo']]
return result
def searchAnswers(search):
"""
This function returns the answer IDs and responses for a list of search terms
"""
answers = loadAnswers()
questions = searchQuestions(search) #get column numbers for query
result = pd.DataFrame(columns=['AnswerID','QuestionaireID'])
for dt in questions.Datatype.unique():
ans = answers[dt]
for i in questions.QuestionaireID.unique():
select = questions.loc[(questions.Datatype == dt)&(questions.QuestionaireID==i)]
fetchcolumns=['AnswerID'] + ['QuestionaireID'] + list(select.ColumnNo.astype(str))
newcolumns = ['AnswerID'] + ['QuestionaireID'] + list(select.Question.astype(str).str.lower())
df = ans.loc[ans['QuestionaireID']==i,fetchcolumns]
df.columns = newcolumns
result = result.merge(df, how='outer')
return result
def extractSocios(searchlist, year=None, col_names=None, geo=None):
"""
This function creates a dataframe containing the data for a set of selected features for a given year.
questionaire options: 6 - pre 1999, 3 - 2000 onwards
This function extracts a set of selected features for a given year.
'geo' adds location data and can be one of Municipality, District, Province or None
"""
if isinstance(searchlist, list):
pass
else:
searchlist = [searchlist]
if col_names is None:
search = dict(zip(searchlist, searchlist))
else:
search = dict(zip(searchlist, col_names))
#filter AnswerIDs by year
ids = loadID()
if year is None:
sub_ids = ids[ids.AnswerID!=0]
else:
sub_ids = ids[(ids.AnswerID!=0)&(ids.Year==year)]
sub_ids = sub_ids.drop_duplicates(subset='AnswerID')
#generate feature frame
result = pd.DataFrame(columns=['AnswerID','QuestionaireID'])
for s in search.keys():
d = searchAnswers(s)
ans = d[(d.AnswerID.isin(sub_ids.AnswerID)) & (d.QuestionaireID < 10)] # remove non-domestic results
ans = ans.dropna(axis=1, how='all')
#set feature frame column names
if len(ans.columns[2:])==1:
ans.columns = ['AnswerID','QuestionaireID'] + [search.get(s)]
try:
result = result.merge(ans, how='outer')
except Exception:
pass
if geo is None:
result = result.merge(sub_ids[['AnswerID', 'ProfileID']], how='left')
else:
result = result.merge(sub_ids[['AnswerID', 'ProfileID', geo]], how='left')
return result
def generateSociosSetSingle(year, spec_file, set_id='ProfileID'):
"""
This function generates a json formatted evidence text file compatible with the syntax for providing evidence to the python library libpgm for the specified year. The function requires a json formatted text file with feature specifications as input.
"""
#Get feature specficiations
files = glob(os.path.join(feature_dir, 'specification', spec_file + '*.txt'))
for file_path in files:
try:
with open(file_path, 'r') as f:
featurespec = json.load(f)
year_range = featurespec['year_range']
except:
raise InputError(year, 'Problem reading the spec file.')
if year >= int(year_range[0]) and year <= int(year_range[1]):
validYears(year) #check if year input is valid
break
else:
continue
searchlist = featurespec['searchlist']
features = featurespec['features']
transform = featurespec['transform']
bins = featurespec['bins']
labels = featurespec['labels']
cut = featurespec['cut']
replace = featurespec['replace']
if len(featurespec['geo'])==0:
geo = None
else:
geo = featurespec['geo']
#Get data and questions from socio-demographic survey responses
data = extractSocios(searchlist, year, col_names=searchlist, geo=geo)
missing_cols = list(set(searchlist) - set(data.columns))
data = data.append(pd.DataFrame(columns=missing_cols), sort=True) #add columns dropped during feature extraction
data.fillna(0, inplace=True) #fill na with 0 to allow for further processing
data['AnswerID'] = data.AnswerID.astype(int)
data['ProfileID'] = data.ProfileID.astype(int)
if len(data) is 0:
raise InputError(year, 'No survey data collected for this year')
else:
#Transform and select BN nodes from dataframe
for k, v in transform.items():
data[k] = data.apply(lambda x: eval(v), axis=1)
try:
data = data[[set_id, geo] + features]
except:
data = data[[set_id] + features]
#adjust monthly income for inflation baselined to December 2016.
#Important that this happens here, after columns have been renamed and before income data is binned
if 'monthly_income' in features:
cpi_percentage=(0.265,0.288,0.309,0.336,0.359,0.377,0.398,0.42,0.459,0.485,0.492,0.509,
0.532,0.57,0.636,0.678,0.707,0.742, 0.784, 0.829,0.88,0.92,0.979,1.03)
cpi = dict(zip(list(range(1994,2015)),cpi_percentage))
data['monthly_income'] = data['monthly_income']/cpi[year]
#Cut columns into datatypes that match factors of BN node variables
for k, v in bins.items():
bin_vals = [int(b) for b in v]
try:
data[k] = pd.cut(data[k], bins = bin_vals, labels = labels[k],
right=eval(cut[k]['right']), include_lowest=eval(cut[k]['include_lowest']))
data[k].cat.reorder_categories(labels[k], inplace=True)
except KeyError:
data[k] = pd.cut(data[k], bins = bin_vals, labels = labels[k])
for y, z in replace.items():
data[y].replace([int(a) for a in z.keys()], z.values(),inplace=True)
data[y].where(data[y]!=0, inplace=True)
data.set_index(set_id, inplace=True) #set ID column as index
return data
def generateSociosSetMulti(spec_files, year_start=1994, year_end=2014):
if isinstance(spec_files, list):
pass
else:
spec_files = [spec_files]
ff = pd.DataFrame()
for spec in spec_files:
gg = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from bld.project_paths import project_paths_join as ppj
# Read the dataset.
adults2005 = pd.read_stata(ppj("IN_DATA", "vp.dta"))
adults2009 = pd.read_stata(ppj("IN_DATA", "zp.dta"))
adults2013 = pd.read_stata(ppj("IN_DATA", "bdp.dta"))
# Extract Column of Big 5 Variables we need for the research.
big_adults_2005 = adults2005.loc[:, 'vp12501':'vp12515']
big_adults_2009 = adults2009.loc[:, 'zp12001':'zp12015']
big_adults_2013 = adults2013.loc[:, 'bdp15101':'bdp15115']
# Rename to meaningful names. (Big Five)
for big_five in [big_adults_2005, big_adults_2009, big_adults_2013]:
big_five.columns = ['work_carefully', 'communicative', 'abrasive',
'new_idea', 'often_worry',
'forgiving_nature', 'lazy', 'outgoing',
'esthetics', 'often_nervous', 'work_efficiently',
'reserved', 'considerate', 'lively_imagination',
'be_relaxed']
# Extract Column of basic variables we need for the research.
ids2005 = adults2005.loc[:, ['hhnr', 'persnr', 'welle', 'vp14701',
'vp14702', 'vp135']]
ids2009 = adults2009.loc[:, ['hhnr', 'persnr', 'welle', 'zp12901',
'zp12902', 'zp137']]
ids2013 = adults2013.loc[:, ['hhnr', 'persnr', 'welle', 'bdp13401',
'bdp13403', 'bdp143']]
# Rename identifiers to match the other data sets.
ids2005.columns = ['cid', 'pid_parents', 'syear', 'sex_parent_2005',
'birth_year_parent_2005', 'german_nationality_2005']
ids2009.columns = ['cid', 'pid_parents', 'syear', 'sex_parent_2009',
'birth_year_parent_2009', 'german_nationality_2009']
ids2013.columns = ['cid', 'pid_parents', 'syear', 'sex_parent_2013',
'birth_year_parent_2013', 'german_nationality_2013']
# Merge the ids with big five or LoC variable.
data_adults_2005 = pd.concat([ids2005, big_adults_2005],
axis=1)
data_adults_2009 = pd.concat([ids2009, big_adults_2009], axis=1)
data_adults_2013 = | pd.concat([ids2013, big_adults_2013], axis=1) | pandas.concat |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 2 21:57:56 2017
@author: The Computer
"""
def ModelIt(SubjectIDandOneHotEncoded):
import numpy as np
from sklearn.externals import joblib
SubjectIDandOneHotEncoded.fillna(value=0,inplace=True)
#import the model
clf=joblib.load('/home/InsightfullyYours/webapp/assets/files/modeldump2.pk1')
#extract the predictions
SubjectIDandOneHotEncoded['Prediction2']=np.round(np.exp(clf.predict(SubjectIDandOneHotEncoded.iloc[:,1:])),0)
Predictions=SubjectIDandOneHotEncoded[['subject_id','Prediction2']]
result = Predictions
return result
def SchedulingMap(GroupedDF,cushion):
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Range1d
from bokeh.models.tools import HoverTool
import datetime
from datetime import datetime as dt
import numpy as np
import pandas as pd
DF=GroupedDF
#convert all the times appropriately and calculate relevant dependent times
DF['Start_dt']=DF['admitdate'].apply(lambda x: dt.strptime(x, '%Y-%m-%d'))
DF['Duration']= | pd.to_timedelta(DF['Duration']) | pandas.to_timedelta |
import gc
from logging import warning
from time import sleep, perf_counter
from typing import Optional, Union, Dict, List, Tuple, Callable
import numpy as np
import pandas as pd
from numpy import ndarray
from rdkit.Chem import AddHs, CanonSmiles, MolToSmiles, MolFromSmiles, MolFromInchi, Kekulize, SanitizeMol
from rdkit.Chem.rdchem import Mol, RWMol
from sklearn.base import BaseEstimator
from .AbstractModel import AbstractModel
from .BaseCreator import getRadicalsByBondIdx, getBondIndex, getBondIndexByAtom
from .Creator import DataGenerator
from .Dataset import FeatureData, allocateFeatureLocation
from .DatasetAPI import DatasetTransmitterWrapper
from .Preprocessing import inputFullCheck, inputCheckRange, inputCheckIterableInRange, MeasureExecutionTime, FixPath, \
ReadFile, ExportFile, SortWithIdx, ArraySorting, EvaluateInputPosition, GetIndexOnArrangedData, ArrayEqual
from .coreConfig import getPrebuiltInfoLabels
BASE_TEMPLATE = Optional[ndarray]
INPUT_FOR_DATABASE = Union[BASE_TEMPLATE, pd.DataFrame]
ITERABLE_TEMPLATE = Union[INPUT_FOR_DATABASE, List, Tuple]
SINGLE_INPUT_TEMPLATE = Union[BASE_TEMPLATE, str]
MULTI_INPUT_TEMPLATE = Union[BASE_TEMPLATE, List[str]]
MULTI_COLUMNS = Union[List, Tuple]
def _FixData_(database: INPUT_FOR_DATABASE) -> ndarray:
if not isinstance(database, ndarray):
database: ndarray = np.asarray(database)
if database.ndim != 2:
database: ndarray = np.atleast_2d(database)
return database
def _tuneFinalDataset_(InfoData: ndarray, FalseLine: List[int], TrueReference: Optional[ndarray] = None) \
-> Tuple[ndarray, Optional[ndarray]]:
inputFullCheck(value=FalseLine, name='FalseLine', dtype='List')
if len(FalseLine) != 0:
FalseLine.sort()
warning(f" These are the error lines needed to be removed: {FalseLine}")
InfoData: ndarray = np.delete(InfoData, obj=FalseLine, axis=0)
if TrueReference is not None:
TrueReference: ndarray = np.delete(TrueReference, obj=FalseLine, axis=0)
return InfoData, TrueReference
class PredictModel(DatasetTransmitterWrapper):
"""
A Python Implementation of AIP-BDET model (Bit2Edge framework): This class used to make prediction as
well as some modifier for data visualization
- AIP-BDET is a low-cost and multi-purpose deep-learning tool that can predict Bond Dissociation Energy with
superior accuracy with powerful classification strength & interpretability on ordinary atoms (C, H, O, N).
- AIP-BDET is constructed based on the inspiration of feedforward network architecture and graph neural network
in chemistry using hashed bit-type molecular fingerprints
"""
_SavedPredLabels: List[str] = ["Reference", "AIP-BDET: Prediction", "AIP-BDET: Error"]
@MeasureExecutionTime
def __init__(self, DatasetObject: FeatureData, GeneratorObject: DataGenerator, TrainedModelMode: Optional[int] = 1,
InputKey: Optional[int] = None, dataType: np.dtype=np.uint8, TrainedModelInputFileName: str = None,
CFile: str = None, SFile: str = None, dataConfiguration: str = None, gpu_memory: bool = False):
print("-" * 33, "INITIALIZATION", "-" * 33)
from .config import MODEL_INIT, updateDataConfig
# [1.0]: Main Attribute for Data
if not isinstance(DatasetObject, FeatureData) or DatasetObject is None:
InputKey: int = MODEL_INIT[TrainedModelMode][0] if InputKey is None else InputKey
self._dataset: FeatureData = FeatureData(InputKey=InputKey, trainable=False, retraining=False,
dataType=dataType)
else:
if DatasetObject.isTrainable():
raise TypeError("This dataset is invalid")
self._dataset: FeatureData = DatasetObject
super(PredictModel, self).__init__(dataset=DatasetObject, priority_key="Test")
if not isinstance(GeneratorObject, DataGenerator) or GeneratorObject is None:
self._generator: DataGenerator = \
DataGenerator(DatasetObject=self.getDataset(), priorityKey=self.getKey(), boostMode=True,
simplifySmilesEnvironment=True, showInvalidStereochemistry=False,
environmentCatching=False)
else:
if GeneratorObject.getDataset() is not self._dataset:
raise TypeError("This generator is invalid is invalid")
self._generator: DataGenerator = GeneratorObject
self._PrebuiltInfoLabels: List[str] = getPrebuiltInfoLabels()
self._dataset.setRequestLabels(value=self._PrebuiltInfoLabels, request="Info")
# [1.1]: Cache Attribute
self._mol, self._radical, self._bondIndex, self._bondType = self._dataset.getPropertiesRespectToColumn()
self.dataType: np.dtype = self._dataset.dataType
# [1.2]: Main Attribute for Model
if True: # Re-initialize Hyper-parameter
TrainedModel: str = MODEL_INIT[TrainedModelMode][1] if TrainedModelInputFileName is None else TrainedModelInputFileName
CColPath: str = MODEL_INIT[TrainedModelMode][2] if CFile is None else CFile
SColPath: str = MODEL_INIT[TrainedModelMode][3] if SFile is None else SFile
if TrainedModel is not None and isinstance(TrainedModel, str):
TrainedModel: str = FixPath(FileName=TrainedModel, extension=".h5")
dataConfiguration: str = MODEL_INIT[TrainedModelMode][4] if dataConfiguration is None else dataConfiguration
if dataConfiguration is not None:
updateDataConfig(dataConfiguration)
if gpu_memory:
from tensorflow import config
dev = config.list_physical_devices('GPU')
config.experimental.set_memory_growth(dev[0], True)
pass
self._dataset.setTrainedSavedLabels(CTrainedLabelsDirectory=CColPath, STrainedLabelsDirectory=SColPath)
self._generator.setBondTypeForInferenceOrFineTuning(bondTypeSaved=self._getBondTypeSaved_())
# print(TrainedModel)
self.TF_model: AbstractModel = AbstractModel(DatasetObject=self._dataset, model=TrainedModel, sparseMode=False)
# [2]: Model Calculation
self._MolArray: List[str] = []
self._RecordedMolArray: Optional[ndarray] = None
self.y_pred: BASE_TEMPLATE = None
self.InterOutput: BASE_TEMPLATE = None
self.BuiltDataFrame: Optional[pd.DataFrame] = None
self.DensityDataFrame: Optional[pd.DataFrame] = None
# [3]: Status Attribute
self._inplace: bool = True # In-place Standardization
self._isStandardized: bool = False # Prevent Multiple Standardization
self._timer: Dict[str, Union[int, float]] = {'add': 0, 'create': 0, 'process': 0, 'predictMethod': 0,
'predictFunction': 0, 'visualize': 0}
# [0]: Indirect/Hidden Method: -----------------------------------------------------------------------------------
# [0.1]: Preprocessing - Timer: ----------------------------------------------------------------------------------
def _resetTime_(self) -> None:
self._timer: Dict[str, Union[int, float]] = {'add': 0, 'create': 0, 'process': 0, 'predictMethod': 0,
'predictFunction': 0, 'visualize': 0}
def _getFullTime_(self) -> float:
return self._timer['add'] + self._getProcessTime_() + self._timer['visualize']
def _getProcessTime_(self) -> float:
return self._timer['create'] + self._timer['process'] + self._timer['predictMethod']
def exportProcessTime(self) -> pd.DataFrame:
""" Return a DataFrame that recorded execution time """
index = ["#1: Adding Data", "#2: Create Data", "#3: Process Data", "#4: Prediction Method",
"#5: Prediction Speed", "#6: Visualization Speed", "#7: Total Time"]
column = ["Full Timing (secs)", "Timing per Unit (ms/bond)"]
numsSamples: int = self._dataset.getRequestData(requests=("Train", "Info")).shape[0]
value = [[self._timer['add'], 1e3 * self._timer['add'] / numsSamples],
[self._timer["create"], 1e3 * self._timer["create"] / numsSamples],
[self._timer["process"], 1e3 * self._timer["process"] / numsSamples],
[self._timer["predictMethod"], 1e3 * self._timer["predictMethod"] / numsSamples],
[self._timer["predictFunction"], 1e3 * self._timer["predictFunction"] / numsSamples],
[self._timer["visualize"], 1e3 * self._timer["visualize"] / numsSamples],
[self._getFullTime_(), 1e3 * self._getFullTime_() / numsSamples]]
x = | pd.DataFrame(data=value, index=index, columns=column, dtype=np.float32) | pandas.DataFrame |
import sys
import pandas as pd
import csv
from ChefRequest import makeRequest
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
starter_problems = {
"0": "CHCHCL",
"1": "TEST",
"2": "INTEST",
"3": "TSORT",
"4": "FCTRL2",
"5": "ATM",
"6": "LADDU",
"7": "START01",
"8": "AMR15A",
"9": "RNDPAIR"
}
def create_soup(x):
return x["tags"] + ' ' + x["author"]
csv_file = "problem_data.csv"
metadata = pd.read_csv(csv_file, low_memory=False)
# Create a new soup feature
metadata["soup"] = metadata.apply(create_soup, axis=1)
count = CountVectorizer(stop_words="english")
count_matrix = count.fit_transform(metadata["soup"])
cosine_sim = cosine_similarity(count_matrix, count_matrix)
metadata = metadata.reset_index()
indices = pd.Series(metadata.index, index=metadata["code"])
def getRecommendations(code="", contestCode=""):
# Get the index of the problem that matches the problem code
global metadata, count, count_matrix, cosine_sim, indices
if not code:
return starter_problems
idx = indices.get(code, -1)
if idx == -1:
response = makeRequest(
"GET", "https://api.codechef.com/contests/" + contestCode + "/problems/" + code).json()
problem_dict = response.get("result", {}).get(
"data", {}).get("content", {})
data_dict = {}
data_dict["code"] = problem_dict.get("problemCode", "")
data_dict["tags"] = problem_dict.get("tags", [])
data_dict["author"] = problem_dict.get("author", "")
data_dict["solved"] = problem_dict.get("successfulSubmissions", 0)
data_dict["attempted"] = problem_dict.get("totalSubmissions", 0)
data_dict["partiallySolved"] = problem_dict.get("partialSubmissions")
csv_columns = ["code", "tags", "author",
"solved", "attempted", "partiallySolved"]
try:
with open(csv_file, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writerow(data_dict)
except IOError:
print("I/O error")
metadata = | pd.read_csv(csv_file, low_memory=False) | pandas.read_csv |
"""ML-Experiments"""
import os
import pandas
from zipfile import ZipFile
class experiment:
def __init__(self, kaggle_api, dataset, dataset_target,
download_directory):
"""Experiment encapsulates a ML experiment
Arguments:
kaggle_api {KaggleApi} -- Instance of KaggleApi
dataset {str} -- <owner/resource>
dataset_target {str} -- <filename>.<ext>
download_directory {str} -- Path to place the downloaded dataset (relative to $VIRTUAL_ENV)
"""
self.kaggle_api = kaggle_api
self.dataset = dataset
self.dataset_target = dataset_target
self.download_directory = os.path.join(os.environ["VIRTUAL_ENV"],
download_directory)
self.dataset_file = os.path.join(self.download_directory,
dataset_target)
self.df = self.initialize_dataframe()
def initialize_dataframe(self):
"""Initialize a DataFrame from a Kaggle dataset"""
if not os.path.exists(self.dataset_file):
self.kaggle_api.dataset_download_file(
self.dataset,
self.dataset_target,
path=self.download_directory)
extract_target = f"{self.dataset_file}.zip"
ZipFile(extract_target).extractall(self.download_directory)
os.unlink(extract_target)
return | pandas.read_csv(self.dataset_file) | pandas.read_csv |
import pandas as pd
import os
#
from .... import global_tools, global_var
from . import paths, transcode
def load(map_code = None):
"""
Loads the production data provided by ENTSO-E
in the given delivery zone.
:param map_code: The bidding zone
:type map_code: string
:return: The production data
:rtype: pd.DataFrame
"""
df_path = paths.fpath_tmp.format(map_code = map_code) + '.csv'
try:
print('Load production/entsoe - ', end = '')
df = pd.read_csv(df_path,
header = [0],
sep = ';',
)
df.loc[:,global_var.production_dt_UTC] = | pd.to_datetime(df[global_var.production_dt_UTC]) | pandas.to_datetime |
import logging
import os
import typing as t
from glob import glob
from pathlib import Path
import pandas as pd
from keras.models import load_model
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from neural_network_model import model as m
from neural_network_model.config import config
_logger = logging.getLogger(__name__)
def load_single_image(data_folder: str, filename: str) -> pd.DataFrame:
"""Makes dataframe with image path and target."""
image_df = []
# search for specific image in directory
for image_path in glob(os.path.join(data_folder, f'{filename}')):
tmp = pd.DataFrame([image_path, 'unknown']).T
image_df.append(tmp)
# concatenate the final df
images_df = pd.concat(image_df, axis=0, ignore_index=True)
images_df.columns = ['image', 'target']
return images_df
def load_image_paths(data_folder: str) -> pd.DataFrame:
"""Makes dataframe with image path and target."""
images_df = []
# navigate within each folder
for class_folder_name in os.listdir(data_folder):
class_folder_path = os.path.join(data_folder, class_folder_name)
# collect every image path
for image_path in glob(os.path.join(class_folder_path, "*.png")):
tmp = | pd.DataFrame([image_path, class_folder_name]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn.cluster import MiniBatchKMeans
def process_am(x):
aa = ''
if type(x) == pd.core.series.Series:
x = x.values
aa = [aa + x[i] for i in range(len(x))]
aa = aa[0]
aa = re.sub('"'," ", aa)
elif type(x) == str:
aa = x
aa = re.sub('"'," ", aa)
aal = []
_aal = aa.split(',')
for aa in _aal:
aa = re.sub("{"," ", aa)
aa = re.sub("}"," ", aa)
aa = re.sub(","," ", aa)
aa = re.sub(":"," ", aa)
aa = re.sub('’n',"", aa)
aa = aa.strip()
aa = re.sub('\s+',"_", aa)
aa = aa.lower()
if len(aa)>0:
aal.append(aa)
return dict.fromkeys(set(aal), 1)
def perc2float(x):
return float(x.strip('%'))/100
########################
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
print("train:",train.shape)
print("test:",test.shape)
# 1. log_price
print("1. log_price")
y_train = train['log_price']
train = train.drop(['log_price'],axis=1)
assert train.shape[1] == test.shape[1]
for i in range(train.shape[1]):
assert train.columns[i] == test.columns[i]
train_obs = len(train)
all_data = pd.concat([train,test],axis=0)
# 2. property_type, room_type, bed_type
print('--------------> Feature Engineering ... ')
print("2. property_type, room_type, bed_type")
encoder = LabelEncoder()
encoder.fit(all_data['property_type'])
all_data['property_type'] = encoder.transform(all_data['property_type'])
all_data['room_type'] = all_data['room_type'].map( {'Entire home/apt':5, 'Private room':3, 'Shared room':1})
all_data.bed_type = all_data.bed_type.fillna('missing')
encoder = LabelEncoder()
encoder.fit(all_data['bed_type'])
all_data['bed_type'] = encoder.transform(all_data['bed_type'])
# 3. amenities
print("3. amenities")
am_list = [process_am( all_data.iloc[i]['amenities']) for i in range(len(all_data))]
assert len(am_list) == len(all_data)
v = DictVectorizer(sparse=False)
X = v.fit_transform(am_list)
amenities_df = pd.DataFrame(data=X,columns=v.feature_names_)
amenities_df.index = all_data.index
all_data = pd.concat([all_data,amenities_df],axis=1)
all_data = all_data.drop(['amenities'],axis=1)
del amenities_df
#4. accommodates , bathrooms
#5. cancellation_policy, cleaning_fee
print("5. cancellation_policy, cleaning_fee")
all_data['cancellation_policy'] = all_data['cancellation_policy'].map( {
'super_strict_60':20,
'super_strict_30':30,
'strict':50,
'moderate':10,
'flexible':5,
'long_term':1,
})
all_data['cleaning_fee'] = all_data['cleaning_fee'].map( {
True:1,
False:0
})
# 6. city
print("6. city")
encoder = LabelEncoder()
encoder.fit(all_data['city'])
all_data['city'] = encoder.transform(all_data['city'])
# 7. description TODO
print("7. description ... TODO")
all_data['description'] = all_data['description'].fillna('')
all_data = all_data.drop(['description'],axis=1)
# 8. first_review , last_review , number_of_reviews , review_scores_rating
print("7. 8. first_review , last_review , number_of_reviews , review_scores_rating ... TODO better")
most_recent_review = pd.to_datetime(all_data.last_review).max()
delta_last_review = most_recent_review - | pd.to_datetime(all_data.last_review) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.