prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from .base import Transformer
import pandas as pd
import numpy as np
import os
ISO_COUNTRY_CODES = os.path.join(os.path.dirname(__file__), 'countrycodes.csv')
class UCDPTransformer(Transformer):
""" Data source specific transformers """
def __init__(self, source, target):
super().__init__(source, target)
self.iso = pd.read_csv(ISO_COUNTRY_CODES,
usecols=[0, 2],
names=['name', 'iso3'],
header=0)
def read(self):
try:
self.ucdp_df = pd.read_csv(self.source[0])
except FileNotFoundError as exc:
raise ValueError("Source file {} not found.".format(self.source)) \
from exc
def write(self):
self.df.to_csv(self.target, mode='w', index=False)
def transform(self):
# self.transform_forcibly_displaced_populations()
self.transform_ucdp()
self.transform_country_code()
def __repr__(self):
return "<UCDPTransformer data for {}-{} ({} rows)>".format(self.df['year'].min(),
self.df['year'].max(),
len(self.df))
def transform_ucdp(self):
events_df = self.ucdp_df.groupby(['year', 'country'])['best'].agg(['size'])
events_df.reset_index(inplace=True)
unique_states = events_df.drop_duplicates("country")[["country"]]
unique_states["key"] = 1
unique_years = events_df.drop_duplicates("year")[["year"]]
unique_years["key"] = 1
states_years_df = pd.merge(unique_states, unique_years, on = "key").drop("key",axis=1)
events_df = pd.merge(states_years_df, events_df, how = "left", on = ["country","year"]).fillna(0)
events_df.columns.values[2] = 'value'
subevents_df = self.ucdp_df.groupby(['year', 'country', 'type_of_violence'])['best'].agg(['size'])
subevents_df.reset_index(inplace=True)
state_based_df = subevents_df[subevents_df['type_of_violence'] == 1][['year', 'country', 'size']]
state_based_df = pd.merge(states_years_df, state_based_df, how = "left", on = ["country","year"]).fillna(0)
state_based_df.columns.values[2] = 'value'
nonstate_df = subevents_df[subevents_df['type_of_violence'] == 2][['year', 'country', 'size']]
nonstate_df = pd.merge(states_years_df, nonstate_df, how = "left", on = ["country","year"]).fillna(0)
nonstate_df.columns.values[2] = 'value'
one_sided_df = subevents_df[subevents_df['type_of_violence'] == 3][['year', 'country', 'size']]
one_sided_df = pd.merge(states_years_df, one_sided_df, how = "left", on = ["country","year"]).fillna(0)
one_sided_df.columns.values[2] = 'value'
rakhine_df = self.ucdp_df[self.ucdp_df['adm_1'] == "Rakhine State"]
myanmar_df = rakhine_df.groupby(['year'])['best'].agg(['size'])
myanmar_df = pd.merge(unique_years, myanmar_df, how = "left", on = ["year"]).fillna(0).drop("key", axis=1)
myanmar_df.columns.values[1] = 'value'
myanmar_df["country"] = "Myanmar"
events_df.loc[:, "Indicator Code"] = "UC.EVT.TOT"
events_df.loc[:, "Indicator Name"] = "Number of conflict events per year"
state_based_df.loc[:, "Indicator Code"] = "UC.EVT.STA"
state_based_df.loc[:, "Indicator Name"] = "Number of state-based conflict events per year"
nonstate_df.loc[:, "Indicator Code"] = "UC.EVT.NON"
nonstate_df.loc[:, "Indicator Name"] = "Number of non-state conflict events per year"
one_sided_df.loc[:, "Indicator Code"] = "UC.EVT.ONE"
one_sided_df.loc[:, "Indicator Name"] = "Number of one-sided conflict events per year"
myanmar_df.loc[:, "Indicator Code"] = "UC.EVT.RAKH"
myanmar_df.loc[:, "Indicator Name"] = "Number of conflict events in Rakhine State per year"
events_fatal = self.ucdp_df.groupby(['year', 'country'])['best'].agg(['sum'])
events_fatal.reset_index(inplace=True)
events_fatal = pd.merge(states_years_df, events_fatal, how = "left", on = ["country","year"]).fillna(0)
events_fatal.columns.values[2] = 'value'
civil_fatal = self.ucdp_df.groupby(['year', 'country'])['deaths_civilians'].agg(['sum'])
civil_fatal.reset_index(inplace=True)
civil_fatal = pd.merge(states_years_df, civil_fatal, how = "left", on = ["country","year"]).fillna(0)
civil_fatal.columns.values[2] = 'value'
subevents_fatal = self.ucdp_df.groupby(['year', 'country', 'type_of_violence'])['best'].agg(['sum'])
subevents_fatal.reset_index(inplace=True)
state_based_fatal = subevents_fatal[subevents_df['type_of_violence'] == 1][['year', 'country', 'sum']]
state_based_fatal = | pd.merge(states_years_df, state_based_fatal, how = "left", on = ["country","year"]) | pandas.merge |
import pandas as pd
import sqlite3
from datetime import datetime, timedelta, date
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
import selectStock_datetime
def scaler(result_df:pd.DataFrame) -> pd.DataFrame:
"""
date๋ฅผ ์ ์ธํ ๋๋จธ์ง ์ปฌ๋ผ 0๊ณผ 1์ฌ์ด๋ก ์ ๊ทํํ๋ ํจ์
result_df : ์ ๊ทํํ ๋ฐ์ดํฐ ํ๋ ์ ๋ฐ์ดํฐ
"""
date_c = list(result_df['date'])
x = result_df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
result_df = pd.DataFrame(x_scaled, columns=result_df.columns)
result_df['date'] = date_c
return result_df
def service(code:str, start_day:int, end_day:int, period:int, drop_holi = 0, stock_moving_avg = 1, day_shift = 0) -> pd.DataFrame:
'''
๊ฐ ์ต์
์ ์
๋ ฅ๋ฐ์ ํด๋น ๊ธฐ๊ฐ์ ๋ฐ์ดํฐ๋ฅผ DB๋ก๋ถํฐ ์กฐํํ์ฌ ์ํ๋ ํํ๋ก ๊ฐ๊ณตํ์ฌ ๋ฆฌํดํ๋ ํจ์
-- ์ต์
์ค๋ช
--
code : ์กฐํํ ์ข
๋ชฉ์ด๋ฆ
start_day : ์กฐํ๋ฅผ ์์ ๋ ์ง
end_day : ์กฐํ ์ข
๋ฃ ๋ ์ง
period : ๋ด์ค ๊ธ๋ถ์ ๊ณผ ์ฃผ๊ฐ๋ฅผ ์ด๋ํ๊ท ๋ผ ๊ธฐ๊ฐ
drop_holi : ์ฃผ๋ง ํน์ ๊ณตํด์ผ ๋ด์ค๋ฅผ ์ฌ์ฉํ ์ง ์ฌ๋ถ. 0 (๋ํดํธ) - ๋ค์ ์์
์ผ ์ฃผ๊ฐ๋ก ์ฑ์์ ์ฌ์ฉ / 1 - ์ฃผ๋ง ๋ฐ ๊ณตํด์ผ ๋ฐ์ดํฐ๋ drop
stock_moving_avg : ์ฃผ๊ฐ๋ฅผ ์ด๋ํ๊ท ๋ผ์ง ์ฌ๋ถ. 1 (๋ํดํธ) - ์ฃผ๊ฐ ์ด๋ํ๊ท ์ฌ์ฉ / 0 - ์ด๋ํ๊ท ์ฌ์ฉ์ํจ
day_shift : ๋ด์ค์ ์ฃผ๊ฐ์ ๋ช ์ผ์ ํ
์ ๋๊ณ ๋ถ์ํ ์ง. 0(๋ํดํธ) | +x - ํด๋น์ผ์ ๋ด์ค์ ๋ค์๋ ์ ์ฃผ๊ฐ ๋ถ์ | -x - ํด๋น์ผ์ ๋ด์ค์ ์ ๋ ์ ์ฃผ๊ฐ ๋ถ์
-- ๋ฆฌํด ์ค๋ช
--
result_df : ๋ ์ง / ๊ธ๋ถ์ / ์ฃผ๊ฐ ๋ฑ๋ฝ ๊ฒฐ๊ณผ๋ฅผ ์ ์ ํ ๋ฐ์ดํฐํ๋ ์
all_keyword : ์กฐํํ ๊ธฐ๊ฐ ์ค ์ ์ฒด์ ํค์๋
pos_keyword : ์กฐํํ ๊ธฐ๊ฐ ์ค ์ฃผ๊ฐ๊ฐ ์ค๋ฅธ๋ ์ ํค์๋
neg_keyword : ์กฐํํ ๊ธฐ๊ฐ ์ค ์ฃผ๊ฐ๊ฐ ๋ด๋ฆฐ๋ ์ ํค์๋
df_length : ์กฐํํ ๊ธฐ๊ฐ์ ๋ฐ์ดํฐํ๋ ์ ๊ธธ์ด
'''
# ์ด๋ํ๊ท ์ ๊ณ ๋ คํ์ฌ DB์์ ์กฐํํ ๋ ์ง ์ค์
inq_day = (datetime.strptime(str(start_day), "%Y%m%d").date() - timedelta(days = period - 1)).strftime('%Y%m%d')
end_day = str(end_day)
# db ๊ฒฝ๋ก๋ ๋ก์ปฌ์ ๋ง๊ฒ ์ค์ ํด์ผํจ
conn = sqlite3.connect("DB/2jo.db")
# ์ปค์ ๋ฐ์ธ๋ฉ
c = conn.cursor()
# ๋ด์ค๋ฐ์ดํฐ ์กฐํ
# query = c.execute(f"select a.id, a.date, a.code, b.senti, b.senti_proba from news_db b join news_id a on b.id = a.id where a.date BETWEEN {inq_day} and {end_day};")
query = c.execute(f"select a.id, a.date, a.code, b.keyword, b.senti, b.senti_proba from news_db b join news_id a on b.id = a.id where a.code = \'{code}\' and (a.date BETWEEN {inq_day} and {end_day});")
# ์ปฌ๋ผ๋ช
์กฐํ
cols = [column[0] for column in query.description]
# ๋ฐ์ดํฐ ํ๋ ์์ผ๋ก ๋ง๋ค๊ธฐ
news_result_df = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)
# ํค์๋ ๋ฐ์ดํฐํ๋ ์ ๋ง๋ค์ด ๋๊ธฐ
keyword_result_df = news_result_df.groupby('date')['keyword'].apply(lambda x: x.sum())
# ์ปค์ ๋ซ๊ธฐ - ์ผ๋จ ์ฃผ์์ฒ๋ฆฌํจ
# conn.close()
# ์ฃผ๊ฐ ๋ฐ์ดํฐ ์กฐํ
query = c.execute(f"select s_date, s_code, f_rate from stock_db where s_code = \'{code}\' and (s_date BETWEEN {inq_day} and {end_day});")
# ์ปฌ๋ผ๋ช
์กฐํ
cols = [column[0] for column in query.description]
# ๋ฐ์ดํฐ ํ๋ ์์ผ๋ก ๋ง๋ค๊ธฐ
stock_result_df = pd.DataFrame.from_records(data=query.fetchall(), columns=cols)
stock_result_df.rename(columns={'s_date': 'date', 's_code': 'code', 'f_rate': 'UpDown'}, inplace=True)
# ๋ฐ์ดํฐํ๋ ์ ๊ธธ์ด ๋ฐํ
df_length = len(stock_result_df)
# ์ฃผ๋ง ๋ฐ ๊ณตํด์ผ drop ์ฌ๋ถ๋ ์ต์
์ ๋ฐ๋ผ; ๋ํดํธ๋ ๋๋์ํจ
if drop_holi:
# ์ฃผ๋ง์ด๋ ๊ณตํด์ผ ๋ฑ์ผ๋ก ์ฃผ๊ฐ๊ฐ ๋น ์ง ๋ ์ drop
merge_outer_df = pd.merge(news_result_df,stock_result_df, how='outer',on='date')
merge_outer_df = merge_outer_df.dropna(subset=['UpDown'])
else:
# ์ฃผ๋ง์ด๋ ๊ณตํด์ผ ๋ฑ์ผ๋ก ์ฃผ๊ฐ๊ฐ ๋น ์ง ๋ ์ ๋ค์ Business Day์ ์ฃผ๊ฐ๋ก ์ฑ์์ค
merge_outer_df = | pd.merge(news_result_df,stock_result_df, how='outer',on='date') | pandas.merge |
import pandas as pd
import os
import json
import numpy as np
from datetime import datetime, date
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
from flask import Flask, jsonify, render_template, redirect, url_for, request
from flask_sqlalchemy import SQLAlchemy
# might be able to remove
import pymysql
# additional packages might be able to remove
from sqlalchemy import Table
from sqlalchemy import Column, Integer, Text
import csv
# project 2 config file
from config import pw
# gp config file
from db_config import pwd
# prediction apps
from FirstPrediction import FirstPredict, recipe_info, FinalPredict
from secondPredict import SecondPredict
df = pd.read_csv('wine_pairings_v7.csv', index_col = 'wine')
app = Flask(__name__, template_folder="templates")
app.config["SQLALCHEMY_DATABASE_URI"] = (
os.environ.get("JAWSDB_URL", "")
or f"mysql+pymysql://root:{pwd}@127.0.0.1:3306/wine_db"
)
db = SQLAlchemy(app)
session = db.session
Base = automap_base()
Base.prepare(db.engine, reflect=True)
wine_data = Base.classes.wine_data
wine_map_data = Base.classes.world_wine_data
wine_blurbs = Base.classes.wine_blurbs
@app.route("/")
def index():
result = render_template("index.html")
return result
@app.route("/grape_guide", methods = ["GET","POST"])
def wine():
wine_prediction = "blank"
if request.method == "POST":
wine_prediction = request.form["wine-selection"]
print(wine_prediction)
result = render_template("grape_guide.html", wine_selection = wine_prediction)
return result
@app.route("/wine_data/<wine>")
def get_wine_data(wine):
qry = (
session.query("* from wine_data;")
.statement
)
df = pd.read_sql_query(qry, db.engine).drop(columns = "ID")
df = df.loc[df[wine]> 0]
data = {
"Wine_Name": pd.DataFrame(df[wine]).columns.values.tolist(),
"Attribute_Labels": np.array(pd.DataFrame(df["Attributes"]).values).flatten().tolist(),
"Attribute_Values": np.array(pd.DataFrame(df[wine]).values).flatten().tolist()
}
return jsonify(data)
@app.route("/wine_blurb/<wine>")
def get_wine_blurb(wine):
qry = session.query("*").filter(wine_blurbs.wine == wine).statement
df = | pd.read_sql_query(qry, db.engine) | pandas.read_sql_query |
import json
import re
import time
import app_settings
from collections import Counter
import matplotlib.pyplot as plt
from gensim.models import Doc2Vec
from gensim.models import Word2Vec
from gensim.models.doc2vec import TaggedDocument
# import nltk
# nltk.download('punkt')
from sklearn.manifold import TSNE
onlyfiles = app_settings.MPD_FILENAMES
file_count=1000
train_data = onlyfiles[:file_count]
test_data = onlyfiles[file_count:int(file_count+file_count*0.2)]
counted = Counter()
def extract_playlist_info(playlist):
name = playlist["name"]
songs = playlist["tracks"]
song_ids = [str(s["track_uri"]) for s in songs]
# song_ids = [str(s["album_uri"]) for s in songs]
song_names = [str(s["track_name"]) for s in songs]
# song_ids = " ".join(song_ids)
# tokenized_text = word_tokenize(song_ids)
return (name+" "+str(playlist["pid"]), (song_names, song_ids))
def searchForId(playlists):
id="spotify:track:0mt02gJ425Xjm7c3jYkOBn"
for p in playlists:
pl = extract_playlist_info(p)
if id in pl[1][1]:
print("yeaaaahhhh")
def generator_playlists(nr_of_files):
for f in onlyfiles[:nr_of_files]:
print(f)
fullPath = app_settings.MPD_PATH + str(f)
data = json.load(open(fullPath))
playlists=[]
searchForId(data["playlists"])
for playlist in data["playlists"]:
pl=extract_playlist_info(playlist)
onlyIds=pl[1][1]
# onlyIds=" ".join(onlyIds)
yield onlyIds
def data_to_word2vec(files,dimension,max_vocab_size):
gen=generator_playlists(files)
model=Word2Vec(size=dimension,min_count=1,max_vocab_size=max_vocab_size)
model.build_vocab(gen)
# print(model.wv['spotify:track:0mt02gJ425Xjm7c3jYkOBn'])
gen = generator_playlists(files)
model.train(gen,total_examples=model.corpus_count,epochs=model.epochs)
# model = Word2Vec(gen, min_count=1)
# print(model.wv.vocab['spotify:track:0mt02gJ425Xjm7c3jYkOBn'])
# print(model.wv.most_similar(positive="spotify:track:0mt02gJ425Xjm7c3jYkOBn"))
return model
#retruns list of all playlists in file
def get_file_contents(files):
for f in onlyfiles[:files]:
print(f)
fullPath = app_settings.MPD_PATH + str(f)
data = json.load(open(fullPath))
# playlists=[]
pls= [pl for pl in data["playlists"]]
yield pls
# data_to_word2vec()
def get_docs(files):
for f in onlyfiles[:files]:
print(f)
fullPath = app_settings.MPD_PATH + str(f)
data = json.load(open(fullPath))
# playlists=[]
for playlist in data["playlists"]:
pl=extract_playlist_info(playlist)
# playlists.append(pl)
pl=pl[1][1]
pl=" ".join(pl)
yield pl
# class tagged_docs:
# def __iter__(self):
# for f in files:
# print(f)
# fullPath = PATH + str(f)
# data = json.load(open(fullPath))
# for playlist in data["playlists"]:
# name = playlist["name"]
# songs = playlist["tracks"]
# song_ids = [str(s["track_uri"]) for s in songs]
# # song_ids = " ".join(song_ids)
# # tokenized_text = word_tokenize(song_ids)
# doc = TaggedDocument( song_ids,[name])
# yield doc
#
def trainModel(files):
class tagged_docs:
def __iter__(self):
for f in files:
print(f)
fullPath = app_settings.MPD_PATH + str(f)
data = json.load(open(fullPath))
for playlist in data["playlists"]:
name = playlist["name"]
# name+="-"+str(playlist["pid"])+" "+name
name=[name+" "+str(playlist["pid"]),name]
songs = playlist["tracks"]
song_ids = [str(s["track_uri"]) for s in songs]
# song_ids = " ".join(song_ids)
# tokenized_text = word_tokenize(song_ids)
doc = TaggedDocument( song_ids,name)
yield doc
# pprint(data)
# counted+=readTitles.titles_to_list(data)
# print(counted)
cnt = Counter(counted)
# docs=memorySavingReader.tagged_docs(data)
docs = tagged_docs()
# model = Doc2Vec(docs, vector_size=100, window=8, min_count=5, workers=4)
vecSize=300
window=2
model=Doc2Vec(vector_size=vecSize, window=window,workers=8)
model.build_vocab(docs)
start = time.clock()
print(model.iter)
model.train(docs, total_examples=model.corpus_count, epochs=model.iter)
print("training took", time.clock() - start)
# model.train(docs)
cnt = Counter(counted)
print(cnt)
#model.save("fullModeD2V"+str(vecSize)+"D"+str(window)+"W")
f=open("fullModeD2V"+str(vecSize)+"D"+str(window)+"W","wb")
pickle.dump(model,f)
return model
def testModel(files,model):
for f in files:
fullPath = app_settings.MPD_PATH + str(f)
data = json.load(open(fullPath))
for playlist in data["playlists"]:
pid=playlist["pid"]
name = playlist["name"]
songs = playlist["tracks"]
song_ids = [str(s["track_uri"]) for s in songs]
# song_ids=" ".join(song_ids)
# tokenized_text = word_tokenize(song_ids)
# inferred_vector = model.infer_vector(TaggedDocument("","Chill"))
# print(model.docvecs.most_similar(positive=['Chill'], negative=[], topn=10))
inferred_vector = model.infer_vector(song_ids[0])
inferred_vector = model.infer_vector(TaggedDocument(song_ids[0],name))
inferred_vector2 = model.infer_vector(song_ids[0])
inferred_vector3 = name
sims = model.docvecs.most_similar([inferred_vector],topn=10)
print(name)
import compare
for tup in sims:
try:
mostSimilarList=int(str(tup[0]).split(" ")[-1])
compare.compare_pids(pid,mostSimilarList)
except:
print(tup)
print("song and name")
print(sims)
print("only song")
print(model.docvecs.most_similar([inferred_vector2],topn=10))
print("only name")
tags = model.docvecs.doctags
if name not in tags:
print("name not in tags")
else:
print(model.docvecs.most_similar(name,topn=10))
# model=trainModel(onlyfiles)
# model=Doc2Vec.load("fullModel")
# print("loading done")
# testModel(onlyfiles,model)
def firstn(arr):
for a in arr:
yield a
def plot(data):
fig, ax = plt.subplots()
for twoDimVec in data:
ax.scatter(twoDimVec[0], twoDimVec[1])
print("show")
plt.show()
def tsne_plot(model):
vocab = list(model.docvecs.doctags)[:1000]
X = model.docvecs[vocab]
tsne = TSNE(n_components=2)
print("try to fit transfo tsne")
X_tsne = tsne.fit_transform(X[:1000])
print("transfo done")
import pandas as pd
df = | pd.DataFrame(X_tsne, index=vocab, columns=['x', 'y']) | pandas.DataFrame |
import re
from collections import defaultdict
import pandas as pd
from scipy import stats
import numpy as np
from rdflib import Namespace, Literal
from brickschema.namespaces import BRICK, A, OWL
# from brickschema.inference import BrickInferenceSession
from brickschema.inference import OWLRLAllegroInferenceSession
from brickschema.graph import Graph
import resolve_ui as ui
import distance
import recordlinkage
from recordlinkage.base import BaseCompareFeature
def graph_from_triples(triples):
g = Graph(load_brick=True)
# g.load_file("ttl/owl.ttl")
g.add(*triples)
sess = OWLRLAllegroInferenceSession()
return sess.expand(g)
def tokenize_string(s):
s = s.lower()
s = re.split(r'-| |_|#|/|:', s)
return s
def compatible_classes(graph, c1, c2):
"""
Returns true if the two classes are compatible (equal, or one is a subclass
of another), false otherwise
"""
q1 = f"ASK {{ <{c1}> owl:equivalentClass?/rdfs:subClassOf*/owl:equivalentClass? <{c2}> }}"
q2 = f"ASK {{ <{c2}> owl:equivalentClass?/rdfs:subClassOf*/owl:equivalentClass? <{c1}> }}"
return graph.query(q1)[0] or graph.query(q2)[0]
def trim_prefix_tokenized(names):
if len(names) <= 1:
return names
max_length = max(map(len, names))
pfx_size = 1
# increase pfx_size until it doesn't match, then reduce by 1 and trim
while pfx_size <= max_length:
pfx = names[0][:pfx_size]
if not all(map(lambda x: x[:pfx_size] == pfx, names[1:])):
pfx_size -= 1
return list([x[pfx_size:] for x in names])
pfx_size += 1
# def trim_common_prefix(names):
# if len(names) <= 1:
# return names
# max_length = max(map(len, names))
# min_length = min(map(len, names))
# pfx_size = max_length
# while True:
# pfx = names[0][:pfx_size]
# # if prefix is common, we return
# if not all(map(lambda x: x[:pfx_size] == pfx, names[1:])):
# pfx_size = int(pfx_size / 2) + 1
#
# return list([x[:pfx_size] for x in names])
# ignore_brick_classes = [BRICK.Sensor, BRICK
class VectorJaccardCompare(BaseCompareFeature):
def _compute_vectorized(self, s1, s2):
s1 = list(s1)
s2 = list(s2)
sim = np.array([1-distance.jaccard(s1[i], s2[i])
for i in range(len(s1))])
return sim
class MaxLevenshteinMatch(BaseCompareFeature):
def _compute_vectorized(self, s1, s2):
# calculate pair-wise levenshtein
s1 = list(s1)
s2 = list(s2)
sim = np.array([distance.jaccard(s1[i], s2[i])
for i in range(len(s1))])
min_dist = np.min(sim)
sim = np.array([1 if x == min_dist and x > .8 else 0 for x in sim])
return sim
def cluster_on_labels(graphs):
# populates the following list; contains lists of URIs that are linked to
# be the same entity
clusters = []
# list of clustered entities
clustered = set()
datasets = []
for source, graph in graphs.items():
df = pd.DataFrame(columns=['label', 'uris'])
print(f"{'-'*5} {source} {'-'*5}")
res = graph.query("SELECT ?ent ?lab WHERE { \
{ ?ent rdf:type/rdfs:subClassOf* brick:Equipment } \
UNION \
{ ?ent rdf:type/rdfs:subClassOf* brick:Point } \
UNION \
{ ?ent rdf:type/rdfs:subClassOf* brick:Location } \
?ent brick:sourcelabel ?lab }")
# TODO: remove common prefix from labels?
labels = [tokenize_string(str(row[1])) for row in res
if isinstance(row[1], Literal)]
# labels = [l for l in labels if l != ["unknown"]]
labels = trim_prefix_tokenized(labels)
uris = [row[0] for row in res if isinstance(row[1], Literal)]
df['label'] = labels
df['uris'] = uris
datasets.append(df)
print("lengths", [len(df) for df in datasets])
if len(datasets) <= 1:
return clusters, clustered
indexer = recordlinkage.Index()
indexer.full()
candidate_links = indexer.index(*datasets)
comp = recordlinkage.Compare()
comp.add(VectorJaccardCompare('label', 'label', label='y_label'))
features = comp.compute(candidate_links, *datasets)
# use metric of '>=.9' because there's just one feature for now and it
# scales [0, 1]
matches = features[features.sum(axis=1) >= .9]
for idx_list in matches.index:
pairs = zip(datasets, idx_list)
entities = [ds['uris'].iloc[idx] for ds, idx in pairs]
for ent in entities:
clustered.add(str(ent))
clusters.append(entities)
return clusters, clustered
def cluster_on_type_alignment(graphs, clustered):
clusters = []
counts = defaultdict(lambda: defaultdict(set))
uris = {}
for source, graph in graphs.items():
res = graph.query("SELECT ?ent ?type ?lab WHERE { \
?ent rdf:type ?type .\
{ ?type rdfs:subClassOf+ brick:Equipment } \
UNION \
{ ?type rdfs:subClassOf+ brick:Point } \
UNION \
{ ?type rdfs:subClassOf+ brick:Location } \
?ent brick:sourcelabel ?lab }")
for row in res:
entity, brickclass, label = row
if entity in clustered:
continue
counts[brickclass][source].add(str(label))
uris[str(label)] = entity
for bc, c in counts.items():
mode_count = stats.mode([len(x) for x in c.values()]).mode[0]
candidates = [(src, list(ents)) for src, ents in c.items()
if len(ents) == mode_count]
if len(candidates) <= 1:
continue
print(f"class {bc} has {len(c)} sources with {mode_count} candidates each")
# short-circuit in the common case
if mode_count == 1:
cluster = [uris[ents[0]] for _, ents in candidates]
if cluster not in clusters:
clusters.append(cluster)
continue
datasets = [ | pd.DataFrame({'label': ents, 'uris': [uris[x] for x in ents]}) | pandas.DataFrame |
import re
import pandas
rule = 'ใ+็ฌฌ([^ๆก]{1,5})ๆกใ(.*)'
chapter = '็ฌฌ[ไธไบไธๅไบๅ
ญไธๅ
ซไนๅ]{1,3}ๅ?[็ซ ็ผ]'
pattern = re.compile(rule)
chapter_pattern = re.compile(chapter)
FORMAL_DIGIT="้ถไธไบไธๅไบๅ
ญไธๅ
ซไน"
math_digit="0123456789"
def format2digit(word):
trans = ""
if word.startswith('ๅ'):
trans += '1'
for c in word:
if c in FORMAL_DIGIT:
trans += math_digit[FORMAL_DIGIT.index(c)]
if word.endswith(c):
if c=="ๅ":
trans += '0'
if c=="็พ":
trans += '00'
return trans
df = | pandas.DataFrame() | pandas.DataFrame |
# FT_connect_functions
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform, registration
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
from shutil import copyfile
from scipy.spatial import distance
from FT_connect_config import *
def genDisplacement(filename_t0,filename_t1):
global pathToSegs
temp1 = np.asarray(np.load(filename_t0,allow_pickle=True)).item()
imfilename1 = temp1['filename'].split('/')[-1]
img1 = io.imread(pathToSegs+imfilename1);
temp2 = np.asarray(np.load(filename_t1,allow_pickle=True)).item()
imfilename2 = temp2['filename'].split('/')[-1]
img2 = io.imread(pathToSegs+imfilename2);
shift_vector = registration.phase_cross_correlation(img1, img2)
return(shift_vector[0])
def buildFeatureFrame(filename_t0,timepoint,pathtoimage="./"):
temp = np.asarray(np.load(filename_t0,allow_pickle=True)).item()
imfilename = temp['filename'].split('/')[-1]
img = io.imread(pathtoimage+imfilename);
masks = clear_border(temp['masks'])
image_props = measure.regionprops_table(masks,
intensity_image=img,
properties=('label','area', 'centroid', 'bbox','mean_intensity'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def expandBoundBox(FeatureFrame, expansion = 2):
hf_row = np.ceil((FeatureFrame['bbox-3']-FeatureFrame['bbox-1'])/2)
hf_col = np.ceil((FeatureFrame['bbox-2']-FeatureFrame['bbox-0'])/2)
maxes = expansion*np.amax(np.vstack((hf_row,hf_col)).T,axis=1).astype(int)
FeatureFrame['ebox-0'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['bbox-0']-maxes)).T,axis=1).astype(int)
FeatureFrame['ebox-1'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['bbox-1']-maxes)).T,axis=1).astype(int)
FeatureFrame['ebox-2'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['bbox-2']),FeatureFrame['bbox-2']+maxes)).T,axis=1).astype(int)
FeatureFrame['ebox-3'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['bbox-3']),FeatureFrame['bbox-3']+maxes)).T,axis=1).astype(int)
return(FeatureFrame)
def futureBoundBox(FeatureFrame,shiftVector):
FeatureFrame['fbox-0'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['ebox-0'] - shiftVector[1] )).T,axis=1).astype(int)
FeatureFrame['fbox-1'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['ebox-1'] - shiftVector[0] )).T,axis=1).astype(int)
FeatureFrame['fbox-2'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['ebox-2']),FeatureFrame['ebox-2'] - shiftVector[1] )).T,axis=1).astype(int)
FeatureFrame['fbox-3'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['ebox-3']),FeatureFrame['ebox-3'] - shiftVector[0] )).T,axis=1).astype(int)
return(FeatureFrame)
def expectedLocation(FeatureFrame,shiftVector):
FeatureFrame['fcentroid-0'] = FeatureFrame['centroid-0'] - shiftVector[1]
FeatureFrame['fcentroid-1'] = FeatureFrame['centroid-1'] - shiftVector[0]
return(FeatureFrame)
def genCandidateNodes(FeatureFrame_t0, FeatureFrame_t1):
candidates = (((np.asarray(FeatureFrame_t1['centroid-0'])[:,None]>=np.asarray(FeatureFrame_t0['fbox-0']))&
(np.asarray(FeatureFrame_t1['centroid-0'])[:,None]<np.asarray(FeatureFrame_t0['fbox-2']))&
(np.asarray(FeatureFrame_t1['centroid-1'])[:,None]>=np.asarray(FeatureFrame_t0['fbox-1']))&
(np.asarray(FeatureFrame_t1['centroid-1'])[:,None]<np.asarray(FeatureFrame_t0['fbox-3']))))
return(candidates)
def getDifference(FeatureFrame_t0, FeatureFrame_t1, feature="position",normed=True):
if (feature == "position"):
delta0 = (np.asarray(FeatureFrame_t1['centroid-0'])[:,None]-np.asarray(FeatureFrame_t0['fcentroid-0']))
delta1 = (np.asarray(FeatureFrame_t1['centroid-1'])[:,None]-np.asarray(FeatureFrame_t0['fcentroid-1']))
result = np.sqrt(delta0**2 + delta1**2 )
else :
result = np.abs(np.asarray(FeatureFrame_t1[feature])[:,None]-np.asarray(FeatureFrame_t0[feature]))
if normed:
result = result/10**np.floor(np.log10(np.max(result)))
return(result)
def DivSizeScore(mom_area, sis_area_1, sis_area_2):
global DivSizeDiffThreshold,DivSizeScoreReturn,DivSizeRatio_Min,DivSizeRatio_Max
areaRatio = (mom_area / (sis_area_1 + sis_area_2))
diffArea = np.abs(sis_area_1 - sis_area_2)/np.sqrt(sis_area_1 *sis_area_2)
if((areaRatio >= DivSizeRatio_Min)&(areaRatio < DivSizeRatio_Max)&(diffArea<DivSizeDiffThreshold)):
return(0.0)
else:
return(DivSizeScoreReturn)
def DivIntScore(sis_int_1, sis_int_2):
global DivIntensityDiffThreshold,DivIntensityScoreReturn
diffInt = np.abs(sis_int_1 - sis_int_2)/np.sqrt(sis_int_1*sis_int_2)
if((diffInt<DivIntensityDiffThreshold)):
return(0.0)
else:
return(DivIntensityScoreReturn)
def DivScore(FeatureFrame_t0, FeatureFrame_t1, index_mom, index_sis_1, index_sis_2):
global DivMoveScoreReturn, mitosis_RangeMultiplier
momFF_select = FeatureFrame_t0.loc[index_mom]
sis1FF_select = FeatureFrame_t1.loc[index_sis_1]
sis2FF_select = FeatureFrame_t1.loc[index_sis_2]
mom_loc = [momFF_select['centroid-0'],momFF_select['centroid-1']]
mom_corner_1 = [momFF_select['bbox-0'],momFF_select['bbox-1']]
mom_corner_2 = [momFF_select['bbox-2'],momFF_select['bbox-3']]
mom_area = (momFF_select['area'])
mom_range = distance.euclidean(mom_corner_1,mom_corner_2)
sis1_loc = [sis1FF_select['centroid-0'],sis1FF_select['centroid-1']]
sis1_area = (sis1FF_select['area'])
sis1_int = (sis1FF_select['mean_intensity'])
sis2_loc = [sis2FF_select['centroid-0'],sis2FF_select['centroid-1']]
sis2_area = (sis2FF_select['area'])
sis2_int = (sis2FF_select['mean_intensity'])
mom_s1_dist = distance.euclidean(sis1_loc,mom_loc)
mom_s2_dist = distance.euclidean(sis2_loc,mom_loc)
sis_middle_loc = (np.array(sis1_loc)+np.array(sis2_loc))/2
cost1 = distance.euclidean(sis_middle_loc,mom_loc)
cost2 = np.abs(mom_s1_dist-mom_s2_dist)
cost3 = distance.euclidean(sis1_loc,sis2_loc)
if(cost3 < (mitosis_RangeMultiplier*mom_range)):
MoveCost = cost1 + cost2/2
else:
MoveCost = DivMoveScoreReturn
SizeCost = DivSizeScore(mom_area=mom_area, sis_area_1=sis1_area, sis_area_2=sis2_area)
IntCost = DivIntScore(sis_int_1=sis1_int, sis_int_2=sis2_int)
finalScore = np.round((MoveCost+SizeCost+IntCost),1)
return([index_mom,index_sis_1,index_sis_2,finalScore])
def GenMitosisPairs(CandidateFrame, motherIndex):
#returns array of daughter index-pairs in candidate frame
DaughtersPossible = np.where(CandidateFrame[:,motherIndex])[0]
if(len(DaughtersPossible)>1):
DaughtersPairs = np.array(np.meshgrid(DaughtersPossible, DaughtersPossible)).T.reshape(-1,2)
Sisters = np.unique(np.sort(DaughtersPairs),axis=0)
Sisters = Sisters[Sisters[:,0] != Sisters[:,1]]
includeMotherIndex = np.append((np.zeros((Sisters.shape[0],1))+motherIndex).astype(int),Sisters, 1)
return(includeMotherIndex)
else:
return(np.array([[0,0,0]]))
def getMitosisCandidates(CandidateFrame,FeatureFrame_t0, FeatureFrame_t1):
global mitosis_MaxScore
divCandidates = np.vstack(list(map(lambda x: GenMitosisPairs(CandidateFrame, x), range(CandidateFrame.shape[1]))))
divCandidates = divCandidates[(divCandidates[:,1]!=0)&(divCandidates[:,2]!=0)]
divScores = np.vstack(list(map(lambda x: DivScore(FeatureFrame_t0=FeatureFrame_t0, FeatureFrame_t1=FeatureFrame_t1,
index_mom=divCandidates[x,0], index_sis_1=divCandidates[x,1],
index_sis_2=divCandidates[x,2]),
range(divCandidates.shape[0]))))
divScores = divScores[divScores[:,3]<mitosis_MaxScore]
return(divScores[:,:3].astype(int))
def getCostMatrix(FeatureFrame_t0, FeatureFrame_t1, shiftVec):
global track_frameExpCoeff, costIntCoefficient, costSizeCoefficient, costPositionCoefficient
FeatureFrame_t0 = expandBoundBox(FeatureFrame_t0, expansion = track_frameExpCoeff)
FeatureFrame_t0 = futureBoundBox(FeatureFrame_t0, shiftVector=shiftVec)
CandidateMtx = genCandidateNodes(FeatureFrame_t0,FeatureFrame_t1)
FeatureFrame_t0 = expectedLocation(FeatureFrame_t0, shiftVector=shiftVec)
deltaPosition = getDifference(FeatureFrame_t0,FeatureFrame_t1,"position")
deltaArea = getDifference(FeatureFrame_t0,FeatureFrame_t1,"area")
deltaIntensity = getDifference(FeatureFrame_t0,FeatureFrame_t1,"mean_intensity")
costMatrix = ((costIntCoefficient*(deltaIntensity)+costSizeCoefficient*(deltaArea)+costPositionCoefficient*(deltaPosition))*CandidateMtx)
return((FeatureFrame_t0,CandidateMtx,costMatrix))
def solveMinCostFlow(CostMatrix,mitosisCands):
global openingCost, closingCost
t0Nodes = np.array(range(CostMatrix.shape[1]))+1
t1Nodes = np.array(range(CostMatrix.shape[1],np.sum(CostMatrix.shape)))+1
start_nodes = np.concatenate((np.repeat([0],np.sum(CostMatrix.shape)), # all connections to source node
(np.nonzero(CostMatrix))[1]+1, # all connections from t0 to t1
t0Nodes,t1Nodes # all connections to sink node
)).tolist()
end_nodes = np.concatenate((t0Nodes,t1Nodes, # all connections to source node
np.nonzero(CostMatrix)[0]+int(CostMatrix.shape[1])+1, # all connections from t0 to t1
np.repeat([np.sum(CostMatrix.shape)+1],np.sum(CostMatrix.shape)) # all connections to sink node
)).tolist()
costs = np.concatenate((np.repeat(1,CostMatrix.shape[1]), # all connections to source node
np.repeat(openingCost,CostMatrix.shape[0]),
CostMatrix[CostMatrix!=0], # all connections from t0 to t1
np.repeat(closingCost,CostMatrix.shape[1]), # all connections to sink node
np.repeat(1,CostMatrix.shape[0]) # all connections to sink node
)).tolist()
nodeCaps = np.concatenate((t0Nodes,t1Nodes),axis=0)
nodeCaps = np.vstack((nodeCaps, np.repeat(1,len(nodeCaps)))).T
if(len(mitosisCands)>0):
nodeCaps[np.searchsorted(nodeCaps[:,0],mitosisCands+1),1]=2
capacities = np.concatenate((nodeCaps[:,1], # all connections to source node
np.repeat(1,np.sum(CostMatrix[CostMatrix!=0].shape)),# all connections from t0 to t1
np.repeat(1,np.sum(CostMatrix.shape)) # all connections to sink node
)).tolist()
# supply_amount = np.min([CostMatrix.shape[1]+len(mitosisCands),CostMatrix.shape[0]])#np.max([CostMatrix.shape[0],CostMatrix.shape[1]])
supply_amount = np.max([CostMatrix.shape[1],CostMatrix.shape[0]])#np.max([CostMatrix.shape[0],CostMatrix.shape[1]])
supplies = np.concatenate(([supply_amount],np.repeat(0,np.sum(CostMatrix.shape)),[-1*supply_amount])).tolist()
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
# Add each arc.
for i in range(len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],capacities[i], int(costs[i]))
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
ArcFrame = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
# exponential fn to fit to curves
def expfun(t, c1, a, T_inf):
return - c1 * np.exp(-a * t) + T_inf
rawdata = pd.read_csv('lab2_home_raw_data.csv')
fig1, ax1 = plt.subplots(figsize=(7,3), tight_layout=True)
fig2, ax2 = plt.subplots(figsize=(7,3), tight_layout=True)
# we iterate through each type, of which
# there are two: (uninsulated, insulated)
curve_parameters = dict()
for runtype, d1 in rawdata.groupby('type'):
xs = d1['time'][d1['time'] <= 50]
ys = d1['tempK'][d1['time'] <= 50] - 273.15
'''
fit to our model of the system. For a body for which LC assumption is true, the governing
equation is given by:
dTs/dt = - (h As) / (m Cv) * ( T_s(t) - T_inf )
We can group parameters:
dTs/dt = - a * (Ts(t) - Tinf)
This 1st order ode has solution of the form:
where c1, a, and T_inf are free parameters
T_s(t) = - c1 e^( -a t ) + T_inf
So we fit the exp curve to those parameters
'''
params, _ = scipy.optimize.curve_fit(expfun, xs, ys, p0=(1, 1e-2, 1))
xxs = np.linspace(np.min(xs), np.max(xs), 900)
yys = expfun(xxs, *params)
curve_parameters[runtype] = {
'c1' : params[0],
'hAs/mCv' : params[1],
'T_inf' : params[2]
}
# we iterate through each run
if runtype == 'uninsulated':
for run, d2 in d1.groupby('video_fname'):
ax1.scatter(d2['time'][d2['time'] <= 50], d2['tempK'][d2['time'] <= 50]- 273.15, marker='+', label=runtype)
ax1.plot(xxs, yys, 'k', label='exp fitted - {}'.format(runtype))
ax1.set_ylabel('Temperature (C)')
ax1.set_xlabel('Time (s)')
ax1.legend()
elif runtype == 'insulated':
for run, d2 in d1.groupby('video_fname'):
ax2.scatter(d2['time'][d2['time'] <= 50], d2['tempK'][d2['time'] <= 50]- 273.15, marker='+', label=runtype)
ax2.plot(xxs, yys, 'k', label='exp fitted - {}'.format(runtype))
ax2.set_ylabel('Temperature (C)')
ax2.set_xlabel('Time (s)')
ax2.legend()
| pd.DataFrame(curve_parameters) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Code for finding the best pair of parameter (d_cn, k_var).
In this script, we denote d_cn as ct (abbr. CN threshold)
and k_var as st (abbr. SNPeff threshold).
@author: <NAME> (<EMAIL>)
"""
import numpy as np
import tensorflow as tf
import pandas as pd
import os
from IO import *
from method import *
from sklearn.metrics import roc_auc_score
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys, getopt
#############################################
## Params
#############################################
cn_start, cn_step, cn_stop = [0., 0., 0.]
snpeff_start, snpeff_step, snpeff_stop = [0, 0, 0]
path = 'Jai Hyun Park'
num_repeat = 10
argv = sys.argv
try:
opts, etc_args = getopt.getopt(argv[1:], 'h', ["repeat=", "path=", "cn-start=", "cn-step=", "cn-stop=", "snpeff-start=", "snpeff-step=", "snpeff-stop="])
except getopt.GetoptError:
print("invalid args")
sys.exit(2)
for opt, arg in opts:
if opt in ("--repeat"):
num_repeat = int(arg)
elif opt in ("--path"):
path = arg
elif opt in ("--cn-start"):
cn_start = float(arg)
elif opt in ("--cn-step"):
cn_step = float(arg)
elif opt in ("--cn-stop"):
cn_stop = float(arg)
elif opt in ("--snpeff-start"):
snpeff_start = int(arg)
elif opt in ("--snpeff-step"):
snpeff_step = int(arg)
elif opt in ("--snpeff-stop"):
snpeff_stop = int(arg)
cts = np.arange(cn_start, cn_stop, cn_step) #Cn Threshold
sts = np.arange(snpeff_start, snpeff_stop, snpeff_step) #Snpeff Threshold
print("* Experiment on ct = (", cn_start, ":", cn_step, ":", cn_stop, "), st = (", snpeff_start, ":", snpeff_step, ":", snpeff_stop, ")")
print("* repeat ", num_repeat, "times, and the results will be written in", path)
#############################################
## Load data
#############################################
# Train data
CN, SNPeff, true = read_all_data('data/Challenge/', '_challenge_CNs.txt', 'out/SNPeff/SNPeff_train.csv')
full_data = np.concatenate([CN, SNPeff.T])
num_CN_gene = CN.shape[0]
# gene list
gene_list = np.array(pd.read_csv('out/SNPeff/variant_gene_list.csv', sep = '\t', header = None, dtype='str'))
#############################################
## Our method
#############################################
approx_aucs = [[0. for st in sts] for ct in cts]
exact_aucs = [[0. for st in sts] for ct in cts]
approx_accs = [[0. for st in sts] for ct in cts]
exact_accs = [[0. for st in sts] for ct in cts]
full_num = full_data.shape[1]
val_size = (int) (full_num / num_repeat)
print(full_num, val_size)
for idx_ct in range(len(cts)):
for idx_st in range(len(sts)):
full_data, true = random_shuffle(full_data, true)
print("ct:", cts[idx_ct], ", st:", sts[idx_st])
ctr = 0
exact_auc, approx_auc = [0, 0]
exact_acc, approx_acc = [0, 0]
# Cross Validation
for i in range(num_repeat):
if (i > 0):
full_data = np.roll(full_data, shift=val_size, axis=1)
true = np.roll(true, shift=val_size, axis=0)
train_data, train_true = full_data[:, val_size:], true[val_size:]
val_data, val_true = full_data[:, :val_size], true[:val_size]
cand1 = candidate1_generation(cts[idx_ct], num_CN_gene, train_data)
cand2 = candidate2_generation(sts[idx_st], num_CN_gene, train_data, train_true)
candidate = np.concatenate((cand1, np.array(cand2) + num_CN_gene))
w = model_generation(train_data, train_true, val_data, val_true, candidate)
tmp_approx_acc, tmp_exact_acc, tmp_approx_auc, tmp_exact_auc = profile(val_data, val_true, candidate, w)
approx_auc += tmp_approx_auc
exact_auc += tmp_exact_auc
approx_acc += tmp_approx_acc
exact_acc += tmp_exact_acc
if tmp_approx_auc != 0:
ctr += 1
if ctr != 0:
approx_aucs[idx_ct][idx_st], exact_aucs[idx_ct][idx_st] = [approx_auc / ctr, exact_auc / num_repeat]
approx_accs[idx_ct][idx_st], exact_accs[idx_ct][idx_st] = [approx_acc / ctr, exact_acc / num_repeat]
else:
approx_aucs[idx_ct][idx_st], exact_aucs[idx_ct][idx_st] = [0, exact_auc / num_repeat]
approx_accs[idx_ct][idx_st], exact_accs[idx_ct][idx_st] = [0, exact_acc / num_repeat]
print("acc(approx/exact):", approx_accs[idx_ct][idx_st], exact_accs[idx_ct][idx_st])
print("auc(approx/exact):", approx_aucs[idx_ct][idx_st], exact_aucs[idx_ct][idx_st])
| pd.DataFrame(approx_aucs, index=cts) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
docstring, to write
"""
import os
from datetime import datetime
from typing import Union, Optional, Any, List, Dict, Iterable, Sequence, NoReturn
from numbers import Real
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import pandas as pd
import xmltodict as xtd
from pyedflib import EdfReader
from ...utils.misc import (
get_record_list_recursive,
)
from ...utils.utils_interval import intervals_union
from ..base import NSRRDataBase
__all__ = [
"SHHS",
]
class SHHS(NSRRDataBase):
"""
Sleep Heart Health Study
ABOUT shhs
----------
***ABOUT the dataset:
1. shhs1 (Visit 1):
1.1. the baseline clinic visit and polysomnogram performed between November 1, 1995 and January 31, 1998
1.2. in all, 6,441 men and women aged 40 years and older were enrolled
1.3. 5,804 rows, down from the original 6,441 due to data sharing rules on certain cohorts and subjects
2. shhs-interim-followup (Interim Follow-up):
2.1. an interim clinic visit or phone call 2-3 years after baseline (shhs1)
2.2. 5,804 rows, despite some subjects not having complete data, all original subjects are present in the dataset
3. shhs2 (Visit 2):
3.1. the follow-up clinic visit and polysomnogram performed between January 2001 and June 2003
3.2. during this exam cycle 3, a second polysomnogram was obtained in 3,295 of the participants
3.3. 4,080 rows, not all cohorts and subjects took part
4. shhs-cvd (CVD Outcomes):
4.1. the tracking of adjudicated heart health outcomes (e.g. stroke, heart attack) between baseline (shhs1) and 2008-2011 (varies by parent cohort)
4.2. 5,802 rows, outcomes data were not provided on all subjects
5. shhs-cvd-events (CVD Outcome Events):
5.1. event-level details for the tracking of heart health outcomes (shhs-cvd)
5.2. 4,839 rows, representing individual events
6. ECG was sampled at 125 Hz in shhs1 and 250/256 Hz in shhs2
7. `annotations-events-nsrr` and `annotations-events-profusion`: annotation files both contain xml files, the former processed in the EDF Editor and Translator tool, the latter exported from Compumedics Profusion
8. about 10% of the records have HRV (including sleep stages and sleep events) annotations
***DATA Analysis Tips:
1. Respiratory Disturbance Index (RDI):
1.1. A number of RDI variables exist in the data set. These variables are highly skewed.
1.2. log-transformation is recommended, among which the following transformation performed best, at least in some subsets:
NEWVA = log(OLDVAR + 0.1)
2. Obstructive Apnea Index (OAI):
2.1. There is one OAI index in the data set. It reflects obstructive events associated with a 4% desaturation or arousal. Nearly 30% of the cohort has a zero value for this variable
2.2. Dichotomization is suggested (e.g. >=3 or >=4 events per hour indicates positive)
3. Central Apnea Index (CAI):
3.1. Several variables describe central breathing events, with different thresholds for desaturation and requirement/non-requirement of arousals. ห58% of the cohort have zero values
3.2. Dichotomization is suggested (e.g. >=3 or >=4 events per hour indicates positive)
4. Sleep Stages:
4.1. Stage 1 and stage 3-4 are not normally distributed, but stage 2 and REM sleep are.
4.2. To use these data as continuous dependent variables, stages 1 and 3-4 must be transformed. The following formula is suggested:
โlog(-log(val/100+0.001))
5. Sleep time below 90% O2:
5.1. Percent of total sleep time with oxygen levels below 75%, 80%, 85% and 90% were recorded
5.2. Dichotomization is suggested (e.g. >5% and >10% of sleep time with oxygen levels below a specific O2 level indicates positive)
More: [1]
***ABOUT signals: (ref. [10])
1. C3/A2 and C4/A1 EEGs, sampled at 125 Hz
2. right and left electrooculograms (EOGs), sampled at 50 Hz
3. a bipolar submental electromyogram (EMG), sampled at 125 Hz
4. thoracic and abdominal excursions (THOR and ABDO), recorded by inductive plethysmography bands and sampled at 10 Hz
5. "AIRFLOW" detected by a nasal-oral thermocouple, sampled at 10 Hz
6. finger-tip pulse oximetry sampled at 1 Hz
7. ECG from a bipolar lead, sampled at 125 Hz for most SHHS-1 studies and 250 (and 256?) Hz for SHHS-2 studies
8. Heart rate (PR) derived from the ECG and sampled at 1 Hz
9. body position (using a mercury gauge sensor)
10. ambient light (on/off, by a light sensor secured to the recording garment)
***ABOUT annotations (NOT including "nsrrid","visitnumber","pptid" etc.):
1. hrv annotations: (in csv files, ref. [2])
Start__sec_ --- 5 minute window start time
NN_RR --- Ratio of consecutive normal sinus beats (NN) over all cardiac inter-beat (RR) intervals
AVNN --- Mean of all normal sinus to normal sinus interbeat intervals (NN)
IHR --- Instantaneous heart rate
SDNN --- Standard deviation of all normal sinus to normal sinus interbeat (NN) intervals
SDANN --- Standard deviation of the averages of normal sinus to normal sinus interbeat (NN) intervals in all 5-minute segments
SDNNIDX --- Mean of the standard deviations of normal sinus to normal sinus interbeat (NN) intervals in all 5-minute segments
rMSSD --- Square root of the mean of the squares of difference between adjacent normal sinus to normal sinus interbeat (NN) intervals
pNN10 --- Percentage of differences between adjacent normal sinus to normal sinus interbeat (NN) intervals that are >10 ms
pNN20 --- Percentage of differences between adjacent normal sinus to normal sinus interbeat (NN) intervals that are >20 ms
pNN30 --- Percentage of differences between adjacent normal sinus to normal sinus interbeat (NN) intervals that are >30 ms
pNN40 --- Percentage of differences between adjacent normal sinus to normal sinus interbeat (NN) intervals that are >40 ms
pNN50 --- Percentage of differences between adjacent normal sinus to normal sinus interbeat (NN) intervals that are >50 ms
tot_pwr --- Total normal sinus to normal sinus interbeat (NN) interval spectral power up to 0.4 Hz
ULF --- Ultra-low frequency power, the normal sinus to normal sinus interbeat (NN) interval spectral power between 0 and 0.003 Hz
VLF --- Very low frequency power, the normal sinus to normal sinus interbeat (NN) interval spectral power between 0.003 and 0.04 Hz
LF --- Low frequency power, the normal sinus to normal sinus interbeat (NN) interval spectral power between 0.04 and 0.15 Hz
HF --- High frequency power, the normal sinus to normal sinus interbeat (NN) interval spectral power between 0.15 and 0.4 Hz
LF_HF --- The ratio of low to high frequency power
LF_n --- Low frequency power (normalized)
HF_n --- High frequency power (normalized)
2. wave delineation annotations: (in csv files, NOTE: see "CAUTION" by the end of this part, ref. [2])
RPoint --- Sample Number indicating R Point (peak of QRS)
Start --- Sample Number indicating start of beat
End --- Sample Number indicating end of beat
STLevel1 --- Level of ECG 1 in Raw data ( 65536 peak to peak rawdata = 10mV peak to peak)
STSlope1 --- Slope of ECG 1 stored as int and to convert to a double divide raw value by 1000.0
STLevel2 --- Level of ECG 2 in Raw data ( 65536 peak to peak rawdata = 10mV peak to peak)
STSlope2 --- Slope of ECG 2 stored as int and to convert to a double divide raw value by 1000.0
Manual --- (True / False) True if record was manually inserted
Type --- Type of beat (0 = Artifact / 1 = Normal Sinus Beat / 2 = VE / 3 = SVE)
Class --- no longer used
PPoint --- Sample Number indicating peak of the P wave (-1 if no P wave detected)
PStart --- Sample Number indicating start of the P wave
PEnd --- Sample Number indicating end of the P wave
TPoint --- Sample Number indicating peak of the T wave (-1 if no T wave detected)
TStart --- Sample Number indicating start of the T wave
TEnd --- Sample Number indicating end of the T wave
TemplateID --- The ID of the template to which this beat has been assigned (-1 if not assigned to a template)
nsrrid --- nsrrid of this record
samplingrate--- frequency of the ECG signal of this record
seconds --- Number of seconds from beginning of recording to R-point (Rpoint / sampling rate)
epoch --- Epoch (30 second) number
rpointadj --- R Point adjusted sample number (RPoint * (samplingrate/256))
CAUTION: all the above sampling numbers except for rpointadj assume 256 Hz, while the rpointadj column has been added to provide an adjusted sample number based on the actual sampling rate.
3. event annotations: (in xml files)
TODO
4. event_profusion annotations: (in xml files)
TODO
***DEFINITION of concepts in sleep study:
1. Arousal: (ref. [3],[4])
1.1. interruptions of sleep lasting 3 to 15 seconds
1.2. can occur spontaneously or as a result of sleep-disordered breathing or other sleep disorders
1.3. sends you back to a lighter stage of sleep
1.4. if the arousal last more than 15 seconds, it becomes an awakening
1.5. the higher the arousal index (occurrences per hour), the more tired you are likely to feel, though people vary in their tolerance of sleep disruptions
2. Central Sleep Apnea (CSA): (ref. [3],[5],[6])
2.1. breathing repeatedly stops and starts during sleep
2.2. occurs because your brain (central nervous system) doesn't send proper signals to the muscles that control your breathing, which is point that differs from obstructive sleep apnea
2.3. may occur as a result of other conditions, such as heart failure, stroke, high altitude, etc.
3. Obstructive Sleep Apnea (OSA): (ref. [3],[7])
3.1. occurs when throat muscles intermittently relax and block upper airway during sleep
3.2. a noticeable sign of obstructive sleep apnea is snoring
4. Complex (Mixed) Sleep Apnea: (ref. [3])
4.1. combination of both CSA and OSA
4.2. exact mechanism of the loss of central respiratory drive during sleep in OSA is unknown but is most likely related to incorrect settings of the CPAP (Continuous Positive Airway Pressure) treatment and other medical conditions the person has
5. Hypopnea:
overly shallowย breathingย or an abnormally lowย respiratory rate. Hypopnea is defined by some to be less severe thanย apneaย (the complete cessation of breathing)
6. Apnea Hypopnea Index (AHI): to write
6.1. used to indicate the severity of OSA
6.2. number of apneas or hypopneas recorded during the study per hour of sleep
6.3. based on the AHI, the severity of OSA is classified as follows
- none/minimal: AHI < 5 per hour
- mild: AHI โฅ 5, but < 15 per hour
- moderate: AHI โฅ 15, but < 30 per hour
- severe: AHI โฅ 30 per hour
7. Oxygen Desaturation:
7.1. used to indicate the severity of OSA
7.2. reductions in blood oxygen levels (desaturation)
7.3. at sea level, a normal blood oxygen level (saturation) is usually 96 - 97%
7.4. (no generally accepted classifications for severity of oxygen desaturation)
- mild: >= 90%
- moderate: 80% - 89%
- severe: < 80%
NOTE
----
ISSUES
------
1. `Start__sec_` might not be the start time, but rather the end time, of the 5 minute windows in some records
2. the current version "0.15.0" removed EEG spectral summary variables
Usage
-----
1. sleep stage
2. sleep apnea
References
----------
[1] https://sleepdata.org/datasets/shhs/pages/
[2] https://sleepdata.org/datasets/shhs/pages/13-hrv-analysis.md
[3] https://en.wikipedia.org/wiki/Sleep_apnea
[4] https://www.sleepapnea.org/treat/getting-sleep-apnea-diagnosis/sleep-study-details/
[5] https://www.mayoclinic.org/diseases-conditions/central-sleep-apnea/symptoms-causes/syc-20352109
[6] https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2287191/
[7] https://www.mayoclinic.org/diseases-conditions/obstructive-sleep-apnea/symptoms-causes/syc-20352090
[8] https://en.wikipedia.org/wiki/Hypopnea
[9] http://healthysleep.med.harvard.edu/sleep-apnea/diagnosing-osa/understanding-results
[10] https://sleepdata.org/datasets/shhs/pages/full-description.md
"""
def __init__(self,
db_dir:str,
working_dir:Optional[str]=None,
verbose:int=2,
**kwargs:Any,) -> NoReturn:
"""
Parameters
----------
db_dir: str,
storage path of the database
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments
default db_dir:
"/export/algo/wenh06/ecg_data/shhs/"
"""
super().__init__(db_name="SHHS", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.current_version = kwargs.get("current_version" , "0.15.0")
self.psg_data_path = None
self.ann_path = None
self.hrv_ann_path = None
self.eeg_ann_path = None
self.wave_deli_path = None
self.event_ann_path = None
self.event_profusion_ann_path = None
self.form_paths()
self.fs = None
self.file_opened = None
# stats
try:
self.rec_with_hrv_ann = [f"shhs{int(row['visitnumber'])}-{int(row['nsrrid'])}" for _,row in self.load_hrv_summary_ann().iterrows()]
except:
self.rec_with_hrv_ann = []
self.all_signals = [
"SaO2", "H.R.", "EEG(sec)", "ECG", "EMG", "EOG(L)", "EOG(R)", "EEG", "SOUND", "AIRFLOW", "THOR RES", "ABDO RES", "POSITION", "LIGHT", "NEW AIR", "OX stat",
]
# annotations regarding sleep analysis
self.hrv_ann_summary_keys = [
"nsrrid", "visitnumber",
"NN_RR", "AVNN", "IHR", "SDNN", "SDANN", "SDNNIDX", "rMSSD",
"pNN10", "pNN20", "pNN30", "pNN40", "pNN50",
"tot_pwr", "ULF", "VLF", "LF", "HF", "LF_HF", "LF_n", "HF_n"
]
self.hrv_ann_detailed_keys = [
"nsrrid", "visitnumber", "Start__sec_",
"ihr", "NN_RR", "AVNN", "SDNN", "rMSSD",
"PNN10", "PNN20", "PNN30", "PNN40", "PNN50",
"TOT_PWR", "VLF", "LF", "LF_n", "HF", "HF_n", "LF_HF",
"sleepstage01", "sleepstage02", "sleepstage03", "sleepstage04", "sleepstage05",
"sleepstage06", "sleepstage07", "sleepstage08", "sleepstage09", "sleepstage10",
"event01start", "event01end", "event02start", "event02end",
"event03start", "event03end", "event04start", "event04end",
"event05start", "event05end", "event06start", "event06end",
"event07start", "event07end", "event08start", "event08end",
"event09start", "event09end", "event10start", "event10end",
"event11start", "event11end", "event12start", "event12end",
"event13start", "event13end", "event14start", "event14end",
"event15start", "event15end", "event16start", "event16end",
"event17start", "event17end", "event18start", "event18end",
"hasrespevent"
]
self.hrv_ann_epoch_len_sec = 300 # 5min
self.sleep_ann_keys_from_hrv = [
"Start__sec_",
"sleepstage01", "sleepstage02", "sleepstage03", "sleepstage04", "sleepstage05",
"sleepstage06", "sleepstage07", "sleepstage08", "sleepstage09", "sleepstage10",
"event01start", "event01end", "event02start", "event02end",
"event03start", "event03end", "event04start", "event04end",
"event05start", "event05end", "event06start", "event06end",
"event07start", "event07end", "event08start", "event08end",
"event09start", "event09end", "event10start", "event10end",
"event11start", "event11end", "event12start", "event12end",
"event13start", "event13end", "event14start", "event14end",
"event15start", "event15end", "event16start", "event16end",
"event17start", "event17end", "event18start", "event18end",
"hasrespevent"
]
self.sleep_stage_ann_keys_from_hrv = [
"Start__sec_",
"sleepstage01", "sleepstage02", "sleepstage03", "sleepstage04", "sleepstage05",
"sleepstage06", "sleepstage07", "sleepstage08", "sleepstage09", "sleepstage10",
]
self.sleep_event_ann_keys_from_hrv = [
"Start__sec_",
"event01start", "event01end", "event02start", "event02end",
"event03start", "event03end", "event04start", "event04end",
"event05start", "event05end", "event06start", "event06end",
"event07start", "event07end", "event08start", "event08end",
"event09start", "event09end", "event10start", "event10end",
"event11start", "event11end", "event12start", "event12end",
"event13start", "event13end", "event14start", "event14end",
"event15start", "event15end", "event16start", "event16end",
"event17start", "event17end", "event18start", "event18end",
"hasrespevent"
]
# annotations from events-nsrr and events-profusion folders
self.event_keys = [
"EventType", "EventConcept", "Start", "Duration", "SignalLocation", "SpO2Nadir", "SpO2Baseline"
]
# NOTE: the union of names from shhs1-200001 to shhs1-200399
# NOT a full search
self.short_event_types_from_event = [
"Respiratory", "Stages", "Arousals",
]
self.long_event_types_from_event = [
"Respiratory|Respiratory", "Stages|Stages", "Arousals|Arousals",
]
# NOTE: the union of names from shhs1-200001 to shhs1-200399
# NOT a full search
# NOT including sleep stages
self.short_event_names_from_event = [
"Central Apnea", "Obstructive Apnea", "Mixed Apnea", "Hypopnea",
"SpO2 artifact", "SpO2 desaturation",
"Arousal ()", "Arousal (Standard)", "Arousal (STANDARD)",
"Arousal (CHESHIRE)", "Arousal (ASDA)",
"Unsure",
]
self.long_event_names_from_event = [
"Central apnea|Central Apnea",
"Obstructive apnea|Obstructive Apnea",
"Mixed apnea|Mixed Apnea",
"Hypopnea|Hypopnea",
"SpO2 artifact|SpO2 artifact",
"SpO2 desaturation|SpO2 desaturation",
"Arousal|Arousal ()",
"Arousal|Arousal (Standard)",
"Arousal|Arousal (STANDARD)",
"Arousal resulting from Chin EMG|Arousal (CHESHIRE)",
"ASDA arousal|Arousal (ASDA)",
"Unsure|Unsure",
]
self.event_profusion_keys = [
"Name", "Start", "Duration", "Input", "LowestSpO2", "Desaturation"
]
# NOTE: currently the union of names from shhs1-200001 to shhs1-200099,
# NOT a full search
self.event_names_from_event_profusion = [
"Central Apnea", "Obstructive Apnea", "Mixed Apnea", "Hypopnea",
"SpO2 artifact", "SpO2 desaturation",
"Arousal ()", "Arousal (ASDA)",
"Unsure",
]
self.apnea_types = [
"Central Apnea", "Obstructive Apnea", "Mixed Apnea", "Hypopnea",
]
# annotations regarding wave delineation
self.wave_deli_keys = [
"RPoint",
"Start", "End",
"STLevel1", "STSlope1", "STLevel2", "STSlope2",
"Manual",
"Type",
"PPoint", "PStart", "PEnd",
"TPoint", "TStart", "TEnd",
"TemplateID",
"nsrrid", "samplingrate", "seconds", "epoch",
"rpointadj",
]
self.wave_deli_samp_num_keys = [
"RPoint",
"Start", "End",
"PPoint", "PStart", "PEnd",
"TPoint", "TStart", "TEnd",
]
# TODO: other annotation files: EEG
# self-defined items
self.sleep_stage_keys = ["start_sec", "sleep_stage"]
self.sleep_event_keys = ["event_name", "event_start", "event_end", "event_duration"]
self.sleep_epoch_len_sec = 30
self.ann_sleep_stages = [0,1,2,3,4,5,9]
"""
0 --- Wake
1 --- sleep stage 1
2 --- sleep stage 2
3 --- sleep stage 3/4
4 --- sleep stage 3/4
5 --- REM stage
9 --- Movement/Wake or Unscored?
"""
self.sleep_stage_protocol = kwargs.get("sleep_stage_protocol", "aasm")
self.all_sleep_stage_names = ["W", "R", "N1", "N2", "N3", "N4"]
self.sleep_stage_name_value_mapping = {
"W":0, "R":1, "N1":2, "N2":3, "N3":4, "N4":5
}
self.sleep_stage_names = []
self.update_sleep_stage_names()
self._to_simplified_states = {9:0, 0:0, 5:1, 1:2, 2:2, 3:3, 4:3}
""" 9 to nan?
0 --- awake
1 --- REM
2 --- N1 (NREM1/2), shallow sleep
3 --- N2 (NREM3/4), deep sleep
"""
self._to_aasm_states = {9:0, 0:0, 5:1, 1:2, 2:3, 3:4, 4:4}
""" 9 to nan?
0 --- awake
1 --- REM
2 --- N1 (NREM1)
3 --- N2 (NREM2)
4 --- N3 (NREM3/4)
"""
self._to_shhs_states = {9:0, 0:0, 5:1, 1:2, 2:3, 3:4, 4:5}
# for plotting
self.palette = {
"W": "orange",
"R": "yellow",
"N1": "green",
"N2": "cyan",
"N3": "blue",
"N4": "purple",
"Central Apnea": "red",
"Obstructive Apnea": "yellow",
"Mixed Apnea": "cyan",
"Hypopnea": "purple",
} # TODO: add more
def form_paths(self) -> NoReturn:
""" finished,
"""
self.psg_data_path = os.path.join(self.db_dir, "polysomnography", "edfs")
self.ann_path = os.path.join(self.db_dir, "datasets")
self.hrv_ann_path = os.path.join(self.ann_path, "hrv-analysis")
self.eeg_ann_path = os.path.join(self.ann_path, "eeg-spectral-analysis")
self.wave_deli_path = os.path.join(self.db_dir, "polysomnography", "annotations-rpoints")
self.event_ann_path = os.path.join(self.db_dir, "polysomnography", "annotations-events-nsrr")
self.event_profusion_ann_path = os.path.join(self.db_dir, "polysomnography", "annotations-events-profusion")
def update_sleep_stage_names(self) -> NoReturn:
""" finished,
"""
if self.sleep_stage_protocol == "aasm":
nb_stages = 5
elif self.sleep_stage_protocol == "simplified":
nb_stages = 4
elif self.sleep_stage_protocol == "shhs":
nb_stages = 6
else:
raise ValueError(f"No stage protocol named {self.sleep_stage_protocol}")
self.sleep_stage_names = self.all_sleep_stage_names[:nb_stages]
def get_subject_id(self, rec:str) -> int:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
Returns
-------
pid, int, `subject_id` derived from `rec`
"""
head_shhs1,head_shhs2v3,head_shhs2v4 = "30000", "30001", "30002"
dataset_no, no = rec.split("-")
dataset_no = dataset_no[-1]
if dataset_no == "2":
raise ValueError("SHHS2 has two different sampling frequencies, currently could not be distinguished using only `rec`")
pid = int(head_shhs1+dataset_no+no)
return pid
def get_visit_number(self, rec:str) -> int:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
Returns
-------
int, visit number extracted from `rec`
"""
return int(rec.split("-")[0][-1])
def get_nsrrid(self, rec:str) -> int:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
Returns
-------
int, nsrrid extracted from `rec`
"""
return int(rec.split("-")[1])
def get_fs(self, rec:str, sig:str="ECG", rec_path:Optional[str]=None) -> int:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
sig: str, default "ECG",
signal name
rec_path: str, optional,
path of the file which contains the psg data,
if not given, default path will be used
Returns
-------
fs, int,
the sampling frequency of the signal `sig` of the record `rec`
"""
frp = self.match_full_rec_path(rec, rec_path)
self.safe_edf_file_operation("open", frp)
chn_num = self.file_opened.getSignalLabels().index(self.match_channel(sig))
fs = self.file_opened.getSampleFrequency(chn_num)
self.safe_edf_file_operation("close")
return fs
def get_chn_num(self, rec:str, sig:str="ECG", rec_path:Optional[str]=None) -> int:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
sig: str, default "ECG",
signal name
rec_path: str, optional,
path of the file which contains the psg data,
if not given, default path will be used
Returns
-------
chn_num, int,
the number of channel of the signal `sig` of the record `rec`
"""
frp = self.match_full_rec_path(rec, rec_path)
self.safe_edf_file_operation("open", frp)
chn_num = self.file_opened.getSignalLabels().index(self.match_channel(sig))
self.safe_edf_file_operation("close")
return chn_num
def match_channel(self, channel:str) -> str:
""" finished,
Parameters
----------
channel: str,
channel name
Returns
-------
str, the standard channel name in SHHS
"""
for sig in self.all_signals:
if sig.lower() == channel.lower():
return sig
raise ValueError(f"No channel named {channel}")
def match_full_rec_path(self, rec:str, rec_path:Optional[str]=None, rec_type:str="psg") -> str:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
rec_path: str, optional,
path of the file which contains the desired data,
if not given, default path will be used
rec_type: str, default "psg",
record type, data or annotations
Returns
-------
"""
extension = {
"psg": ".edf",
"wave_delineation": "-rpoint.csv",
"event": "-nsrr.xml",
"event_profusion": "-profusion.xml"
}
folder_or_file = {
"psg": self.psg_data_path,
"hrv_summary": os.path.join(self.hrv_ann_path, f"shhs{self.get_visit_number(rec)}-hrv-summary-{self.current_version}.csv"),
"hrv_5min": os.path.join(self.hrv_ann_path, f"shhs{self.get_visit_number(rec)}-hrv-5min-{self.current_version}.csv"),
"eeg_band_summary": os.path.join(self.eeg_ann_path, f"shhs{self.get_visit_number(rec)}-eeg-band-summary-dataset-{self.current_version}.csv"),
"eeg_spectral_summary": os.path.join(self.eeg_ann_path, f"shhs{self.get_visit_number(rec)}-eeg-spectral-summary-dataset-{self.current_version}.csv"),
"wave_delineation": self.wave_deli_path,
"event": self.event_ann_path,
"event_profusion": self.event_profusion_ann_path
}
if rec_path is not None:
rp = rec_path
elif rec_type.split("_")[0] in ["hrv", "eeg"]:
rp = folder_or_file[rec_type]
else:
rp = os.path.join(folder_or_file[rec_type]+rec.split("-")[0], rec+extension[rec_type])
return rp
def database_stats(self) -> NoReturn:
"""
"""
raise NotImplementedError
def database_info(self, detailed:bool=False) -> NoReturn:
""" finished,
print information about the database
Parameters
----------
detailed: bool, default False,
if False, "What","Who","When","Funding" will be printed,
if True, then docstring of the class will be printed additionally
"""
raw_info = {
"What": "Multi-cohort study focused on sleep-disordered breathing and cardiovascular outcomes",
"Who": "5804 adults aged 40 and older",
"When": "Two exam cycles, 1995-1998 and 2001-2003. Cardiovascular disease outcomes were tracked until 2010",
"Funding": "National Heart, Lung, and Blood Institute"
}
print(raw_info)
if detailed:
print(self.__doc__)
def show_rec_stats(self, rec:str, rec_path:Optional[str]=None) -> NoReturn:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
rec_path: str, optional,
path of the file which contains the psg data,
if not given, default path will be used
"""
frp = self.match_full_rec_path(rec, rec_path, rec_type="psg")
self.safe_edf_file_operation("open", frp)
for chn,lb in enumerate(self.file_opened.getSignalLabels()):
print("SignalLabel:",lb)
print("Prefilter:",self.file_opened.getPrefilter(chn))
print("Transducer:",self.file_opened.getTransducer(chn))
print("PhysicalDimension:",self.file_opened.getPhysicalDimension(chn))
print("SampleFrequency:",self.file_opened.getSampleFrequency(chn))
print("*"*40)
self.safe_edf_file_operation("close")
def load_data(self, rec:str) -> NoReturn:
"""
"""
raise ValueError("Please load specific data, for example, psg, ecg, eeg, etc.")
def load_ann(self, rec:str) -> NoReturn:
"""
"""
raise ValueError("Please load specific annotations, for example, event annotations, etc.")
def load_psg_data(self, rec:str, channel:str="all", rec_path:Optional[str]=None) -> Dict[str, np.ndarray]:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
channel: str, default "all",
name of the channel of PSG,
if is "all", then all channels will be returned
rec_path: str, optional,
path of the file which contains the psg data,
if not given, default path will be used
Returns
-------
dict, psg data
"""
chn = self.match_channel(channel) if channel.lower() != "all" else "all"
frp = self.match_full_rec_path(rec, rec_path, rec_type="psg")
self.safe_edf_file_operation("open", frp)
data_dict = {k: self.file_opened.readSignal(idx) for idx,k in enumerate(self.file_opened.getSignalLabels())}
self.safe_edf_file_operation("close")
if chn == "all":
return data_dict
else:
return {chn: data_dict[chn]}
def load_ecg_data(self, rec:str, rec_path:Optional[str]=None) -> np.ndarray:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
rec_path: str, optional,
path of the file which contains the ecg data,
if not given, default path will be used
Returns
-------
"""
return self.load_psg_data(rec=rec, channel="ecg", rec_path=rec_path)[self.match_channel("ecg")]
def load_event_ann(self, rec:str, event_ann_path:Optional[str]=None, simplify:bool=False) -> pd.DataFrame:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
event_ann_path: str, optional,
path of the file which contains the events-nsrr annotations,
if not given, default path will be used
Returns
-------
df_events: DataFrame,
"""
file_path = self.match_full_rec_path(rec, event_ann_path, rec_type="event")
with open(file_path) as fd:
doc = xtd.parse(fd.read())
df_events = pd.DataFrame(doc["PSGAnnotation"]["ScoredEvents"]["ScoredEvent"][1:])
if simplify:
df_events["EventType"] = df_events["EventType"].apply(lambda s: s.split("|")[1])
df_events["EventConcept"] = df_events["EventConcept"].apply(lambda s: s.split("|")[1])
for c in ["Start", "Duration", "SpO2Nadir", "SpO2Baseline"]:
df_events[c] = df_events[c].apply(self.str_to_real_number)
return df_events
def load_event_profusion_ann(self, rec:str, event_profusion_ann_path:Optional[str]=None) -> dict:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
event_profusion_ann_path: str, optional,
path of the file which contains the events-profusion annotations,
if not given, default path will be used
Returns
-------
dict, with items "sleep_stage_list", "df_events"
TODO:
merge "sleep_stage_list" and "df_events" into one DataFrame
"""
file_path = self.match_full_rec_path(rec, event_profusion_ann_path, rec_type="event_profusion")
with open(file_path) as fd:
doc = xtd.parse(fd.read())
sleep_stage_list = [int(ss) for ss in doc["CMPStudyConfig"]["SleepStages"]["SleepStage"]]
df_events = pd.DataFrame(doc["CMPStudyConfig"]["ScoredEvents"]["ScoredEvent"])
for c in ["Start", "Duration", "LowestSpO2", "Desaturation"]:
df_events[c] = df_events[c].apply(self.str_to_real_number)
ret = {
"sleep_stage_list": sleep_stage_list,
"df_events": df_events
}
return ret
def load_hrv_summary_ann(self, rec:Optional[str]=None, hrv_ann_path:Optional[str]=None) -> pd.DataFrame:
""" finished,
Parameters
----------
rec: str, optional,
record name, typically in the form "shhs1-200001"
hrv_ann_path: str, optional,
path of the summary HRV annotation file,
if not given, default path will be used
Returns
-------
df_hrv_ann, DataFrame,
if `rec` is not None, df_hrv_ann is the summary HRV annotations of `rec`;
if `rec` is None, df_hrv_ann is the summary HRV annotations of all records
that had HRV annotations (about 10% of all the records in SHHS)
"""
if rec is None:
file_path = self.match_full_rec_path("shhs1-200001", hrv_ann_path, rec_type="hrv_summary")
df_hrv_ann = pd.read_csv(file_path, engine="python")
file_path = self.match_full_rec_path("shhs2-200001", hrv_ann_path, rec_type="hrv_summary")
df_hrv_ann = pd.concat([df_hrv_ann, pd.read_csv(file_path, engine="python")])
return df_hrv_ann
file_path = self.match_full_rec_path(rec, hrv_ann_path, rec_type="hrv_summary")
df_hrv_ann = pd.read_csv(file_path, engine="python")
if rec is None:
return df_hrv_ann
df_hrv_ann = df_hrv_ann[df_hrv_ann["nsrrid"]==self.get_nsrrid(rec)].reset_index(drop=True)
return df_hrv_ann
def load_hrv_detailed_ann(self, rec:str, hrv_ann_path:Optional[str]=None) -> pd.DataFrame:
""" finished,
Parameters
----------
rec: str,
record name, typically in the form "shhs1-200001"
hrv_ann_path: str, optional,
path of the detailed HRV annotation file,
if not given, default path will be used
Returns
-------
df_hrv_ann, DataFrame,
detailed HRV annotations of `rec`
"""
file_path = self.match_full_rec_path(rec, hrv_ann_path, rec_type="hrv_5min")
if not os.path.isfile(file_path):
raise FileNotFoundError(f"Record {rec} has no HRV annotation (including sleep annotaions). Or the annotation file has not been downloaded yet. Or the path {file_path} is not correct. Please check!")
self.logger.info(f"HRV annotations of record {rec} will be loaded from the file\n{file_path}")
df_hrv_ann = | pd.read_csv(file_path, engine="python") | pandas.read_csv |
# -*- coding:utf-8 _*-
"""
@author:<NAME>
@time: 2019/12/02
"""
from urllib.parse import unquote
import pandas as pd
from redis import ConnectionPool, Redis
from scrapy.utils.project import get_project_settings
from dingxiangyuan import settings
from sqlalchemy import create_engine
from DBUtils.PooledDB import PooledDB
class DBPoolHelper(object):
def __init__(self, dbname, user=None, password=None, db_type='postgressql', host='localhost', port=5432):
"""
# sqlite3
# ่ฟๆฅๆฐๆฎๅบๆไปถๅ๏ผsqliteไธๆฏๆๅ ๅฏ๏ผไธไฝฟ็จ็จๆทๅๅๅฏ็
import sqlite3
config = {"datanase": "path/to/your/dbname.db"}
pool = PooledDB(sqlite3, maxcached=50, maxconnections=1000, maxusage=1000, **config)
# mysql
import pymysql
pool = PooledDB(pymysql,5,host='localhost', user='root',passwd='<PASSWORD>',db='myDB',port=3306) #5ไธบ่ฟๆฅๆฑ ้็ๆๅฐ่ฟๆฅๆฐ
# postgressql
import psycopg2
POOL = PooledDB(creator=psycopg2, host="127.0.0.1", port="5342", user, password, database)
# sqlserver
import pymssql
pool = PooledDB(creator=pymssql, host=host, port=port, user=user, password=password, database=database, charset="utf8")
:param type:
"""
if db_type == 'postgressql':
import psycopg2
pool = PooledDB(creator=psycopg2, host=host, port=port, user=user, password=password, database=dbname)
elif db_type == 'mysql':
import pymysql
pool = PooledDB(pymysql, 5, host='localhost', user='root', passwd='<PASSWORD>', db='myDB',port=3306) # 5ไธบ่ฟๆฅๆฑ ้็ๆๅฐ่ฟๆฅๆฐ
elif db_type == 'sqlite':
import sqlite3
config = {"datanase": dbname}
pool = PooledDB(sqlite3, maxcached=50, maxconnections=1000, maxusage=1000, **config)
else:
raise Exception('่ฏท่พๅ
ฅๆญฃ็กฎ็ๆฐๆฎๅบ็ฑปๅ, db_type="postgresql" or db_type="mysql" or db_type="sqlite"' )
self.conn = pool.connection()
self.cursor = self.conn.cursor()
def connect_close(self):
"""ๅ
ณ้ญ่ฟๆฅ"""
self.cursor.close()
self.conn.close()
def execute(self, sql, params=tuple()):
self.cursor.execute(sql, params) # ๆง่ก่ฟไธช่ฏญๅฅ
self.conn.commit()
def fetchone(self, sql, params=tuple()):
self.cursor.execute(sql, params)
data = self.cursor.fetchone()
return data
def fetchall(self, sql, params=tuple()):
self.cursor.execute(sql, params)
data = self.cursor.fetchall()
return data
def pandas_db_helper():
"""
'postgresql://postgres:[email protected]:5432/xiaomuchong'
"mysql+pymysql://root:[email protected]:3306/srld?charset=utf8mb4"
"sqlite: ///sqlite3.db"
"""
engine = create_engine(settings.DATABASE_ENGINE)
conn = engine.connect()
return conn
def redis_init():
settings = get_project_settings()
if settings["REDIS_PARAMS"]:
pool = ConnectionPool(host=settings["REDIS_HOST"], port=settings["REDIS_PORT"],
password=settings["REDIS_PARAMS"]['password'])
else:
pool = ConnectionPool(host=settings["REDIS_HOST"], port=settings["REDIS_PORT"])
conn = Redis(connection_pool=pool)
return conn
redis_conn = redis_init()
db_conn = pandas_db_helper()
def cal_page_url(row):
topic_url, reply_num = row[0], row[1]
page_num = reply_num // 35 + 1
redis_conn.sadd('topic_page_urls', topic_url)
for page in range(2, page_num + 1):
redis_conn.sadd('topic_page_urls', f'{topic_url}?ppg={page}')
print(topic_url)
def insert_redis_topic_page_urls():
data = pd.read_sql(sql="topics", con=db_conn, columns=["topic_url", "reply_num"])
data.apply(cal_page_url, axis=1)
def get_topic_left_start_urls():
topic_urls = pd.read_sql(sql="select distinct topic_url from posts_replies", con=db_conn)
topic_urls_floor_one = pd.read_sql(sql="select topic_url from posts_replies where floor=1", con=db_conn)
has_topic_urls = set(topic_urls['topic_url']) - set(topic_urls_floor_one['topic_url'])
topic_page_urls = redis_conn.smembers('topic_page_urls')
start_urls = {url.decode() for url in topic_page_urls if url.decode().split('?')[0] in has_topic_urls}
print(len(has_topic_urls), len(start_urls))
def get_user_start_urls():
""" ่ทๅ็จๆท่กจ่ตทๅงurl """
user_urls = | pd.read_sql(sql="select distinct author_url from posts_replies", con=db_conn) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 11:29:34 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
# from functools import partial
from american_option_pricing import american_option
import density_utilities as du
import prediction_ensemble_py as pe
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = pd.read_excel('spy.xlsx', index_col=None)
current_date = date(2020,9,29)
expiry_date = date(2020,10,2)
days_to_expiry = np.busday_count( current_date, expiry_date)-1
# min_p_profit = 35
# hor_leg_factor = 0.05
forecast_dens = False
save_results = True
save_plots = True
Strategies = []
Strategies = ["Butterfly","Double Broken Wing Butterfly","Iron Condor"]
# Strategies = ["Iron Condor"]
"""
#######################################################################################
Get Risk Free Date
#######################################################################################
"""
print("\n Gathering Risk Free Rate")
rf_eod_data = yf.download("^IRX", start="2020-07-01", end= current_date.strftime("%Y-%m-%d"))
for col in rf_eod_data.columns:
rf_eod_data[col] = pd.to_numeric(rf_eod_data[col],errors='coerce')
rf_eod_data=rf_eod_data.fillna(method='ffill')
rf_eod_data['interest']=((1+(rf_eod_data['Adj Close']/100))**(1/252))-1
rf_eod_data['annualized_interest']=252*(((1+(rf_eod_data['Adj Close']/100))**(1/252))-1)
rf_value =rf_eod_data['annualized_interest'].iloc[-1]
print("\nCurrent Risk Free Rate is :",'{:.3f}%'.format(rf_value*100))
"""
#######################################################################################
Data Cleaning
#######################################################################################
"""
def wrang_1(df, col_names):
for col in col_names:
df[col] = df[col].str.rstrip('%')
df[col] = pd.to_numeric(df[col],errors='coerce')
df[col] = [float(x)/100.0 for x in df[col].values]
return df
convert_cols = ["Impl Vol", "Prob.ITM","Prob.OTM","Prob.Touch"]
data = wrang_1(data,convert_cols)
def label_type(row):
if row['Symbol'][0] == "." :
return 'Option'
return 'Stock'
data['Type']=data.apply(lambda row: label_type(row), axis=1)
data['Expiry_Date']= data.Symbol.str.extract('(\d+)')
data['Expiry_Date'] = data['Expiry_Date'].apply(lambda x: pd.to_datetime(str(x), format='%y%m%d'))
expiry_date_str = expiry_date.strftime("%Y%m%d")
data['Expiry_Date'] = data['Expiry_Date'].fillna( | pd.Timestamp(expiry_date_str) | pandas.Timestamp |
from pop_finder import locator_mod
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib
import os
import shutil
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix, classification_report
import itertools
def contour_classifier(
sample_data,
num_contours=10,
run_locator=False,
gen_dat=None,
nboots=50,
return_plots=True,
return_df=True,
save_dir="out",
multi_iter=1,
**kwargs,
):
"""
Wrapper function that runs locator to generate a density of predictions,
then uses contour lines to choose the most likely population.
Parameters
----------
sample_data : string
Filepath to input file containing coordinates and populations of
samples, including individuals from unknown populations.
num_contours : int
Number of contours to generate and search for closest population in
(Default=10).
run_locator : boolean
Run locator and use outputs to generate classifications. If set to
False, then will look in specified save_dir for the *_predlocs.txt
files from a previous locator run. If set to True, ensure that
gen_dat is not None (Default=False).
gen_dat : string
Filepath to input genetic data in VCF format (Default=None).
nboots : int
Number of bootstrap iterations (Default=50).
return_plots : boolean
Return contour plots of prediction densities overlayed with true
population locations (Default=True).
return_df : boolean
If true, saves the results in a csv file in the save_dir folder
(Default=True).
save_dir : string
Folder to save results. Folder should already be in directory. If
using results from multiple iterations, only include prefix (e.g.
'out' for 'out1', 'out2', 'out3'; Default='out').
multi_iter : int
How many times to run locator to get predictions. If the sample size
is small, it is better to run multiple iterations (Default=1).
Returns
-------
class_df : pd.DataFrame
Dataframe containing classifications of samples of unknown origin.
"""
# Check if save_dir exists
if (os.path.isdir(save_dir) is not True and
os.path.isdir(save_dir + "1") is not True):
raise ValueError("save_dir does not exist")
# Check is sample_data path exists
if (isinstance(sample_data, pd.DataFrame) is False and
os.path.exists(sample_data) is False):
raise ValueError("path to sample_data incorrect")
# Make sure hdf5 file is not used as gen_dat
if run_locator is True and gen_dat.endswith('hdf5'):
raise ValueError("Cannot use hdf5 file, please use vcf")
if run_locator is True:
# Check components
if os.path.exists(gen_dat) is not True:
raise ValueError("path to genetic data incorrect")
for i in range(1, multi_iter + 1):
if os.path.exists(save_dir + str(i)):
shutil.rmtree(save_dir + str(i))
os.makedirs(save_dir + str(i))
locator_mod.locator(
sample_data,
gen_dat,
bootstrap=True,
nboots=nboots,
out=save_dir + str(i),
**kwargs,
)
plt.close()
out_list = []
for i in range(1, multi_iter + 1):
for j in range(nboots):
out_list.append(
save_dir + str(i) + "/loc_boot" + str(j) + "_predlocs.txt"
)
with open(out_list[0], "a") as outfile:
for names in out_list[1:]:
with open(names) as infile:
string = ""
outfile.write(string.join(infile.readlines()[1:]))
else:
if multi_iter == 1:
if os.path.isdir(save_dir) is True:
# Check to make sure right number of boots selected
if os.path.exists(
save_dir + "/loc_boot" + str(nboots + 1) + "_predlocs.txt"
):
raise ValueError(
"Number of bootstraps in output directory does not\
match nboots specified"
)
out_list = [
save_dir + "/loc_boot" + str(i) + "_predlocs.txt"
for i in range(nboots)
]
if sum(1 for line in open(out_list[0])) == sum(
1 for line in open(out_list[1])
):
with open(out_list[0], "a") as outfile:
for names in out_list[1:]:
with open(names) as infile:
string = ""
outfile.write(
string.join(
infile.readlines()[1:]
)
)
else:
# Check to make sure right number of boots selected
if os.path.exists(
save_dir + "1/loc_boot" + str(nboots + 1) + "_predlocs.txt"
):
raise ValueError(
"Number of bootstraps in output directory does not\
match nboots specified"
)
out_list = [
save_dir + "1/loc_boot" + str(i) + "_predlocs.txt"
for i in range(nboots)
]
if sum(1 for line in open(out_list[0])) == sum(
1 for line in open(out_list[1])
):
with open(out_list[0], "a") as outfile:
for names in out_list[1:]:
with open(names) as infile:
string = ""
outfile.write(
string.join(
infile.readlines()[1:]
)
)
if multi_iter > 1:
out_list = []
for i in range(1, multi_iter + 1):
for j in range(nboots):
out_list.append(
save_dir+str(i)+"/loc_boot"+str(j)+"_predlocs.txt"
)
if sum(1 for line in open(out_list[0])) == sum(
1 for line in open(out_list[1])
):
with open(out_list[0], "a") as outfile:
for names in out_list[1:]:
with open(names) as infile:
string = ""
outfile.write(string.join(infile.readlines()[1:]))
# Convert input data file (true locs) into correct file for contour wrapper
if isinstance(sample_data, pd.DataFrame) is not True:
true_dat = | pd.read_csv(sample_data, sep="\t") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os.path
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from preprocessing.preprocessing import preprocess_whole_pipeline
def load_model(path_model):
bst = xgb.Booster({'nthread': 8}) # init model
bst.load_model(path_model) # load data
return bst
def get_prediction(model, data_test, ID, label, output_path):
dtest = xgb.DMatrix(data_test)
ypred = [max(0,round(value,2)) for value in list(model.predict(dtest))]
res = pd.DataFrame({"drug_id":ID, label:ypred},columns=["drug_id",label])
if output_path != "print":
res.to_csv(path_or_buf=output_path, index=False, float_format='%.2f')
print("Prediction DONE, results available here: " + output_path)
else:
print(res)
def create_model(data_train,label,max_depth,eta,num_round,path_model,NaN_imputation_feature_scaling_PCA_boolean,directory_of_script):
'''
Creation of the model using XGBoost
'''
print("Training model using: XGBoost")
df_train, df_test = train_test_split(data_train, test_size=0.2)
dtrain = xgb.DMatrix(df_train.drop(label,axis=1), label=df_train[label])
dtest = xgb.DMatrix(df_test.drop(label,axis=1), label=df_test[label])
evallist = [(dtest, 'eval'), (dtrain, 'train')]
evals_result = {}
param = {'max_depth': max_depth, 'eta': eta, 'objective': 'reg:squarederror'}
param['nthread'] = 8
param['eval_metric'] = 'rmse'
bst = xgb.train(param,
dtrain,
num_round,
evallist,
early_stopping_rounds=10,
evals_result=evals_result)
now = datetime.now()
dt_string = now.strftime("%d_%m_%Y_%H_%M_%S")
param_string = 'max_depth_' + str(param['max_depth']) + "_eta_" + str(param['eta']) + "_num_round_" + str(num_round) + "_NaN_imputation_feature_scaling_PCA_usage_" + str(NaN_imputation_feature_scaling_PCA_boolean)
model_name = param_string + "_" + dt_string
bst.save_model(path_model + "_" + model_name)
print("Model is available here: " + path_model + "_" + model_name)
'''
Get the XGBoost model results and information
'''
print("Plotting validation curve")
x_axis = range(len(evals_result['train']['rmse']))
fig, ax = plt.subplots(figsize=(10, 10))
ax.plot(x_axis, evals_result['train']['rmse'], label='Train')
ax.plot(x_axis, evals_result['eval']['rmse'], label='Test')
ax.legend()
plt.ylabel('RMSE')
plt.xlabel('Number of Rounds')
plt.title('XGBoost RMSE')
plt.savefig(os.path.join(directory_of_script,"results","Validation Curve" + "_" + model_name + ".png"))
print("Learning Curve is available here: " + os.path.join(directory_of_script,"results","Validation Curve" + "_" + model_name + ".png"))
ypred = bst.predict(dtest)
RMSE = mean_squared_error(df_test[label], ypred, squared=False)
print("RMSE: %.4f" % RMSE)
print("Check importance of features\n")
fig, ax = plt.subplots(figsize=(100, 100))
ax = xgb.plot_importance(bst,ax=ax)
ax.figure.savefig(os.path.join(directory_of_script,"results","Feature Importance" + "_" + model_name + ".png"))
print("Features Importance is available here: " + os.path.join(directory_of_script,"results","Feature Importance" + "_" + model_name + ".png"))
print("Training DONE")
def main(args):
directory_of_script = os.path.dirname(os.path.realpath(__file__))
path_of_output_prediction = args.path_of_output_prediction
data_directory = args.data_directory
path_model = args.path_model
mode = args.mode
NaN_imputation_feature_scaling_PCA_usage = args.NaN_imputation_feature_scaling_PCA_usage
max_depth = args.max_depth
eta = args.eta
num_round = args.num_round
FILENAME_DRUGS_TRAIN = "drugs_train.csv"
FILENAME_DRUGS_TEST = "drugs_test.csv"
FILENAME_FEATURE_ENG = "drug_label_feature_eng.csv"
FILENAME_ACTIVE_INGREDIENTS = "active_ingredients.csv"
FEATURE_TO_DUMMY = ["administrative_status",
"marketing_status",
"approved_for_hospital_use",
"dosage_form",
"marketing_authorization_status",
"marketing_authorization_process",
"pharmaceutical_companies"]
LABEL = "price"
path_drugs_train = os.path.join(data_directory,FILENAME_DRUGS_TRAIN)
path_feature_eng = os.path.join(data_directory,FILENAME_FEATURE_ENG)
path_active_ingredients = os.path.join(data_directory,FILENAME_ACTIVE_INGREDIENTS)
path_drugs_test = args.path_of_data_to_predict
if path_drugs_test is None : path_drugs_test = os.path.join(data_directory,FILENAME_DRUGS_TEST)
'''
Reading files
'''
print("Reading files...")
df_drugs_train = | pd.read_csv(path_drugs_train, sep=",") | pandas.read_csv |
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import model_from_json
import os.path
from tensorflow import keras
import streamlit as st
import numpy as np
from app import days
# db_connection = sql.connect(host='localhost', database='timeseries', user='root', password='')
# test_data = pd.read_sql('SELECT * FROM btc_usdt', con=db_connection)
crypto_currency = 'BTC'
base_currency = 'USD'
prediction_days = 60
def get_train_data():
n_days = days
data = pd.read_csv('./data/btc_usdt.csv', header=None)
data.columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
data['Date'] = pd.to_datetime(data['Date'])
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data['Close'].values.reshape(-1,1))
x_train, y_train = [], []
if n_days > 1:
for x in range(prediction_days, len(scaled_data)-n_days):
x_train.append(scaled_data[x-prediction_days:x, 0])
y_train.append(scaled_data[x+n_days, 0])
else:
for x in range(prediction_days, len(scaled_data)):
x_train.append(scaled_data[x-prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1],1))
return data, scaler, x_train, y_train
def get_test_data():
data = get_train_data()[0]
scaler = MinMaxScaler(feature_range=(0,1))
test_start = "2018-01-01"
test_data = pd.read_csv('./data/btc_usdt.csv', header=None)
test_data.columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
test_data['Date'] = pd.to_datetime(test_data['Date'])
test_data = test_data.loc[test_data['Date']>=test_start]
actual_values = test_data['Close'].values
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(actual_values.reshape(-1,1))
total_data = | pd.concat((data['Close'], test_data['Close']), axis=0) | pandas.concat |
import os
import re
import warnings
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder, StandardScaler
from Logging.logging import Logger
from Prediction_data_ingestion.data_loading_prediction import DataGetterPrediction
warnings.simplefilter(action='ignore', category=FutureWarning)
class PreprocessingMethodsPrediction:
"""
Description: This class will contain the methods which will be used for the data transformation
before the date clustering and model training
Written By: <NAME>
Version: 1.0
Revision: None
"""
def __init__(self):
self.logger_obj = Logger()
self.file_object = open("PredictionLogs/preprocessingLogs.txt", "a+")
self.df = DataGetterPrediction(self.file_object, self.logger_obj).getData().copy()
def removeUnnecessaryFeatureColumn(self, column_name):
"""
Description: This method is used to remove any unnecessary columns from the dataframe
Written By: <NAME>
Version: 1.0
Revision: None
:param column_name: Name of column that needs to be removed from the dataframe
:return: None
"""
try:
# removing the given column from the dataframe
self.df.drop(columns=[column_name], inplace=True)
self.logger_obj.log(self.file_object, f"Feature column named {column_name} removed from the dataframe")
except Exception as e:
self.logger_obj.log(self.file_object,
f"Exception occurred while removing a feature column {column_name} from the "
f"dataframe. Exception: {str(e)} ")
raise e
def datatypeToDatetime(self, column_name):
"""
Description: This method is used to change the datatype of a column (Date_of_Journey) to datetime
Written By: <NAME>
Version: 1.0
Revision: None
:param column_name: Name of the column for which the datatype needs to be changed to datetime
:return: None
"""
try:
# converting the datatype of provided column into datetime
self.df[column_name] = pd.to_datetime(self.df[column_name], format="%d/%m/%Y")
self.logger_obj.log(self.file_object,
f"Datatype of feature column named {column_name} changed to datetime..")
except Exception as e:
self.logger_obj.log(self.file_object,
f"Exception occurred while changing the datatype of column {column_name} to datetime. "
f"Exception: {str(e)}")
raise e
def splittingDatetimeColumnIntoThree(self, column_name):
"""
Description: This method is used to create three new columns from the datetime column from the dataframe.
Method also removes the original datetime column after creating three new columns.
Written By: <NAME>
Version: 1.0
Revision: None
:param column_name: Name of datetime column that needs to split into 3 columns
:return: None
"""
try:
# splitting the Date_of_Journey which contains the whole data into three new columns each of those showing
# day, month and year of the date
self.df['Day_of_Journey'] = self.df['Date_of_Journey'].dt.day
self.df['Month_of_Journey'] = self.df['Date_of_Journey'].dt.month
self.df['Year_of_Journey'] = self.df['Date_of_Journey'].dt.year
self.logger_obj.log(self.file_object, "Successfully split the datetime column into three newly created "
"columns namely Day_of_Journey, Month_of_Journey and Year_of_Journey")
# removing the original Date_of_Journey column from the dataframe
self.removeUnnecessaryFeatureColumn("Date_of_Journey")
except Exception as e:
self.logger_obj.log(self.file_object,
f"Exception occurred while splitting the datetime column into three newly created "
f"columns. Exception: {str(e)}")
raise e
def convertDurationIntoMinutes(self):
"""
Description: This method is used to create a new column named Flight_Duration which contains the flight duration
in minutes from the already present Duration column which has duration in the hours and minutes. Function also
removes the original Duration column after removing the Flight_Duration column
Written By: <NAME>
Version: 1.0
Revision: None
:return: None
"""
try:
# creating the three possible pattern for the values in the featue column containing duration in hours and
# minutes
pattern1 = re.compile(r"(\d+)(h|m)(\s)(\d*)(h|m)*")
pattern2 = re.compile(r"(\s*)(\d+)(h)")
pattern3 = re.compile(r"(\s*)(\d+)(m)")
# creating an empty list which will be used to store the flight duration in minute for every data
# observation
min_lst = []
# calculating the flight duration in minutes for each data observation and then adding it to the created
# empty list
for i in range(self.df.shape[0]):
if 'h' in self.df.loc[i, "Duration"] and 'm' in self.df.loc[i, "Duration"]:
matchobj = re.match(pattern1, self.df.loc[i, "Duration"])
hour = int(matchobj.group(1))
minute = int(matchobj.group(4))
total_min = 60 * hour + minute
min_lst.append(total_min)
elif 'h' in self.df.loc[i, "Duration"] and 'm' not in self.df.loc[i, "Duration"]:
matchobj = re.match(pattern2, self.df.loc[i, "Duration"])
hour = int(matchobj.group(2))
min_lst.append(60 * hour)
elif 'h' not in self.df.loc[i, "Duration"] and 'm' in self.df.loc[i, "Duration"]:
matchobj = re.match(pattern3, self.df.loc[i, "Duration"])
minute = int(matchobj.group(2))
min_lst.append(minute)
else:
min_lst.append(self.df.loc[i, "Duration"])
# removing the original Duration column which contains the flight duration in hours and minutes
self.removeUnnecessaryFeatureColumn("Duration")
# adding a new duration column which contains the flight duration in minutes to the dataframe
train_values = | pd.Series(min_lst) | pandas.Series |
# Copyright ยฉ 2019 <NAME>
"""
Test for the ``preprocess._aggregate_columns._difference`` module.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...clean_variables import VariableCleaner
class PreprocessConstantDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module. Assert final data frames match expectations.
"""
@staticmethod
def test_clean_difference_ints_0():
"""Test subtracting 0 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [1, 2, 3]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_ints_1():
"""Test subtracting 1 from a column."""
_input = DataFrame({"A": [1, 2, 3]})
_expected = DataFrame({"A": [0, 1, 2]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 1}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_0():
"""Test subtracting 0.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [1.0, 2.0, 3.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": 0.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_floats_negative_1():
"""Test subtracting -1.0 from a column."""
_input = DataFrame({"A": [1.0, 2.0, 3.0]})
_expected = DataFrame({"A": [2.0, 3.0, 4.0]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": -1.0}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
class PreprocessVariableDifferenceTests(unittest.TestCase):
"""
Tests for the ``preprocess._aggregate_columns._difference`` module with column subtraction.
"""
@staticmethod
def test_clean_difference_int_column():
"""Test subtracting the right column from the left."""
_input = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]})
_expected = DataFrame({"A": [-1, -1, -1], "B": [2, 3, 4]})
_groupings = [{"operator": "difference", "columns": ["A"], "value": "B"}]
_vc = VariableCleaner(_input)
_vc.clean(_groupings)
assert_frame_equal(_expected, _vc.frame)
@staticmethod
def test_clean_difference_right_string_column():
"""Test subtracting the right column from the left. Right column has strings."""
_input = | DataFrame({"A": [1, 2, 3], "B": ["2", "3", "4"]}) | pandas.DataFrame |
"""
The ``risk_models`` module provides functions for estimating the covariance matrix given
historical returns. Because of the complexity of estimating covariance matrices
(and the importance of efficient computations), this module mostly provides a convenient
wrapper around the underrated `sklearn.covariance` module.
The format of the data input is the same as that in :ref:`expected-returns`.
**Currently implemented:**
- sample covariance
- semicovariance
- exponentially weighted covariance
- mininum covariance determinant
- shrunk covariance matrices:
- manual shrinkage
- Ledoit Wolf shrinkage
- Oracle Approximating shrinkage
- covariance to correlation matrix
"""
import warnings
import numpy as np
import pandas as pd
import sklearn.covariance
from .expected_returns import returns_from_prices
def sample_cov(prices, frequency=252):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
return daily_returns.cov() * frequency
def semicovariance(prices, benchmark=0.000079, frequency=252):
"""
Estimate the semicovariance matrix, i.e the covariance given that
the returns are less than the benchmark.
.. semicov = E([min(r_i - B, 0)] . [min(r_j - B, 0)])
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param benchmark: the benchmark return, defaults to the daily risk-free rate, i.e
:math:`1.02^{(1/252)} -1`.
:type benchmark: float
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year). Ensure that you use the appropriate
benchmark, e.g if ``frequency=12`` use the monthly risk-free rate.
:type frequency: int, optional
:return: semicovariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
daily_returns = returns_from_prices(prices)
drops = np.fmin(daily_returns - benchmark, 0)
return drops.cov() * frequency
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean()[-1]
def exp_cov(prices, span=180, frequency=252):
"""
Estimate the exponentially-weighted covariance matrix, which gives
greater weight to more recent data.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:return: annualised estimate of exponential covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("prices are not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
assets = prices.columns
daily_returns = returns_from_prices(prices)
N = len(assets)
# Loop over matrix, filling entries with the pairwise exp cov
S = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
S[i, j] = S[j, i] = _pair_exp_cov(
daily_returns.iloc[:, i], daily_returns.iloc[:, j], span
)
return | pd.DataFrame(S * frequency, columns=assets, index=assets) | pandas.DataFrame |
from torch.utils.data import Dataset
from cmapPy.pandasGEXpress.parse import parse
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import numpy as np
import os
import ai.causalcell.utils.register as register
from rdkit import Chem
import pandas as pd
from rdkit.Chem import AllChem
import random
import pickle
import time
paths_to_L1000_files = {
"phase1": {
"path_to_dir": "Data/L1000_PhaseI",
"path_to_data": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_"
"n473647x12328.gctx",
"path_to_sig_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_sig_info.txt",
"path_to_gene_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_gene_info.txt",
"path_to_pert_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_pert_info.txt",
"path_to_cell_info": "Data/L1000_PhaseI/GSE92742_Broad_LINCS/GSE92742_Broad_LINCS_cell_info.txt"
}, "phase2": {
"path_to_dir": "Data/L1000_PhaseII",
"path_to_data": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/Level5_COMPZ_n118050x12328_2017-03-06.gctx",
"path_to_sig_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/sig_info_2017-03-06.txt",
"path_to_gene_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/gene_info_2017-03-06.txt",
"path_to_pert_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/pert_info_2017-03-06.txt",
"path_to_cell_info": "Data/L1000_PhaseII/GSE70138_Broad_LINCS/cell_info_2017-04-28.txt"
}}
"""Dict of l1000 datasets that will be initialized with the relevant dataset objects if necessary so that all
dataloaders use the same dataset object when possible, in order to instantiate only one."""
dict_of_l1000_datasets = {}
def get_fingerprint(smile, radius, nBits):
try:
if smile == "-666" or smile == "restricted":
return None
return np.array(AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(smile), radius, nBits))
except:
return None
def get_concentration(s):
if s.endswith('ยตM') or s.endswith('um'):
return float(s[:-3])
if s.endswith('nM'):
return 0.001 * float(s[:-3])
return -1
def get_time(s):
return float(s[:-2])
########################################################################################################################
# L1000 dataset
########################################################################################################################
class L1000Dataset(Dataset):
"""
Information on the dataset can be found here:
https://docs.google.com/document/d/1q2gciWRhVCAAnlvF2iRLuJ7whrGP6QjpsCMq1yWz7dU/edit#heading=h.usef9o7fuux3
"""
def __init__(self, phase="phase2", radius=2, nBits=1024):
"""
:param phase: phase 1 or 2 of the dataset
:param radius: parameter for fingerprints https://www.macinchem.org/reviews/clustering/clustering.php
:param nBits: desired length of fingerprints
"""
assert phase in ["phase1", "phase2", "both"]
self.both = (phase == "both")
if phase == "both":
self.phase = "phase1"
else:
self.phase = phase
# fingerprint parameters
self.radius = radius
self.nBits = nBits
# Load metadata
self.cell_info, self.landmark_gene_list, self.pert_info = self.load_metadata()
self.sig_info = self.build_environment_representation()
# Load data
if self.both:
self.data = pd.concat([self.load_data("phase1"), self.load_data("phase2")], sort=False)
else:
self.data = self.load_data(phase)
# Get dictionary of non empty environments
self.env_dict = self.get_env_dict()
def load_data(self, phase):
"""
We store the data as a single column dataframe containing numpy arrays.
It allows a considerable speedup when accessing rows during training
"""
path_to_data = paths_to_L1000_files[phase]["path_to_data"]
df_path = os.path.join(paths_to_L1000_files[phase]["path_to_dir"], "dataframe.pkl")
if os.path.isfile(df_path):
print("Loading data of", phase)
pickle_in = open(df_path, "rb")
data = pickle.load(pickle_in)
else: # If the data has not been saved yet, parse the original file and save dataframe
print("Parsing original data, only happens the first time...")
data = parse(path_to_data, rid=self.landmark_gene_list).data_df.T
# Ensure that the order of columns corresponds to landmark_gene_list
data = data[self.landmark_gene_list]
# Remove rows that are not in sig_info
data = data[data.index.isin(self.sig_info.index)]
# Transform data so that it only has one column
d = pd.DataFrame(index=data.index, columns=["gene_expr"])
d["gene_expr"] = list(data.to_numpy())
data = d
# Save data
pickle_out = open(df_path, "wb")
pickle.dump(data, pickle_out, protocol=2)
pickle_out.close()
return data
def get_env_dict(self):
"""
:return: dict with (pert, cell) keys corresponding to non empty environments
dict[(pert, cell)] contains the list of all corresponding sig_ids
"""
dict_path = os.path.join(paths_to_L1000_files[self.phase]["path_to_dir"], "dict_" + str(self.both) + ".npy")
if os.path.isfile(dict_path): # if the dict has been saved previously, load it
env_dict = np.load(dict_path, allow_pickle='TRUE').item()
else:
print("Building dict of all environments, only happens the first time...")
env_dict = {}
for index, row in self.data.iterrows():
pert = self.sig_info.pert_id.loc[index]
cell = self.sig_info.cell_id.loc[index]
if (pert, cell) in env_dict.keys():
env_dict[(pert, cell)].append(index)
else:
env_dict[(pert, cell)] = [index]
np.save(dict_path, env_dict)
return env_dict
def load_metadata(self):
# cell_info and gene_info files are the same for both phases
cell_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_cell_info"],
sep="\t", index_col="cell_id")
cell_info['cell_id'] = cell_info.index # Store cell_id in a column
# Get list of landmark genes
gene_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_gene_info"], sep="\t")
landmark_gene_list = gene_info[gene_info['pr_is_lm'] == 1]["pr_gene_id"].astype(str)
# Load pert_info
pert_info = pd.read_csv(paths_to_L1000_files[self.phase]["path_to_pert_info"], sep="\t",
index_col="pert_id", usecols=["pert_id", "canonical_smiles"])
if self.both: # If we want both phases, load the other phase as well
pert_info_2 = pd.read_csv(paths_to_L1000_files["phase2"]["path_to_pert_info"], sep="\t",
index_col="pert_id", usecols=["pert_id", "canonical_smiles"])
pert_info = pd.concat([pert_info, pert_info_2])
# Remove duplicate indices
pert_info = pert_info.loc[~pert_info.index.duplicated(keep='first')]
# Load fingerprints
fps_path = os.path.join(paths_to_L1000_files[self.phase]["path_to_dir"], "fingerprints_" + str(self.radius)
+ "_" + str(self.nBits) + "_" + str(self.both) + ".pkl")
if os.path.isfile(fps_path):
fps = | pd.read_pickle(fps_path) | pandas.read_pickle |
import pandas as pd
import numpy as np
from brightwind.transform import transform as tf
from brightwind.utils import utils
from brightwind.analyse import plot as plt
from brightwind.utils.utils import _convert_df_to_series
import matplotlib
__all__ = ['monthly_means',
'momm',
'dist',
'dist_matrix',
'dist_of_wind_speed',
'dist_by_dir_sector',
'dist_matrix_by_dir_sector',
'dist_12x24',
'freq_distribution',
'freq_table',
'time_continuity_gaps',
'coverage',
'basic_stats',
'TI',
'sector_ratio',
'calc_air_density']
def dist_matrix(var_series, x_series, y_series,
num_bins_x=None, num_bins_y=None,
x_bins=None, y_bins=None,
x_bin_labels=None, y_bin_labels=None,
var_label=None, x_label=None, y_label=None,
aggregation_method='%frequency',
return_data=False):
"""
Calculates the distribution of a variable against two other variables, on an X-Y plane, returning a heat map.
By default, the X and Y variables are binned in bins of 1. However, this behaviour can be modified by the user.
:param var_series: Time-series of the variable whose distribution we need to find.
:type var_series: pandas.Series
:param x_series: Time-series of the X variable which we want to bin against, forms columns of distribution.
:type x_series: pandas.Series
:param y_series: Time-series of the Y variable which we want to bin against, forms rows of distribution.
:type y_series: pandas.Series
:param num_bins_x: Number of evenly spaced bins to use for x_series. If this and x_bins are not specified, bins
of width 1 are used.
:type num_bins_x: int
:param num_bins_y: Number of evenly spaced bins to use for y_series. If this and y_bins are not specified, bins
of width 1 are used.
:type num_bins_y: int
:param x_bins: (optional) Array of numbers where adjacent elements of array form a bin. Overwrites num_bins_x.
If set to None derives the min and max from the x_series series and creates evenly spaced number of
bins specified by num_bins_x.
:type x_bins: list, array, None
:param y_bins: (optional) Array of numbers where adjacent elements of array form a bin. Overwrites num_bins_y.
If set to None derives the min and max from the y_series series and creates evenly spaced number of
bins specified by num_bins_y.
:type y_bins: list, array, None
:param x_bin_labels: (optional) Labels of bins to be used for x_series, uses (bin-start, bin-end] format by
default.
:type x_bin_labels:list
:param y_bin_labels: (optional) Labels of bins to be used for y_series, uses (bin-start, bin-end] format by
default.
:type y_bin_labels: list
:param var_label: (Optional) Label to use for variable distributed, by default name of the var_series is used.
:type var_label: str
:param x_label: (Optional) Label to use for x_label of heat map, by default name of the x_series is used.
:type x_label: str
:param y_label: (Optional) Label to use for y_label of heat map, by default name of the y_series is used.
:type y_label: str
:param aggregation_method: Statistical method used to find distribution. It can be mean, max, min, std, count,
%frequency or a custom function. Computes frequency in percentages by default.
:type aggregation_method: str or function
:param return_data: If True data is also returned with a plot.
:return: A heat map and a distribution matrix if return_data is True, otherwise just a heat map.
**Example usage**
::
import brightwind as bw
df = bw.load_csv(r'C:\\Users\\Stephen\\Documents\\Analysis\\demo_data.csv')
# For distribution of mean wind speed standard deviation against wind speed and temperature
bw.dist_matrix(df.Spd40mNStd, x_series=df.T2m, y_series=df.Spd40mN, aggregation_method='mean')
# To change the number of bins
bw.dist_matrix(df.Spd40mNStd, x_series=df.T2m, y_series=df.Spd40mN, num_bins_x=4, num_bins_y=10)
# To specify custom bins
bw.dist_matrix(df.Spd40mNStd, x_series=df.T2m, y_series=df.Spd40mN,
y_bins=[0,6,12, 15, 41], y_bin_labels=['low wind', 'medium wind', 'gale', 'storm'],
aggregation_method='min', return_data=True)
# For custom aggregation function
def custom_agg(x):
return x.mean()+(2*x.std())
data = bw.dist_matrix(df.Spd40mNStd, x_series=df.T2m, y_series=df.Spd40mN,
aggregation_method=custom_agg, return_data=True)
"""
var_series = _convert_df_to_series(var_series).dropna()
y_series = _convert_df_to_series(y_series).dropna()
x_series = _convert_df_to_series(x_series).dropna()
if x_label is not None:
x_series.name = x_label
if y_label is not None:
y_series.name = y_label
if var_series.name is None:
var_series.name = 'var_series'
if y_series.name is None:
y_series.name = 'binned_var_1'
if x_series.name is None:
x_series.name = 'binned_var_2'
if var_label is None:
var_label = aggregation_method.capitalize() + ' of ' + var_series.name
var_series.name = var_label
if x_series.name == var_series.name:
x_series.name = x_series.name+"_binned"
if y_series.name == var_series.name:
y_series.name = y_series.name+"_binned"
if num_bins_x is None and x_bins is None:
x_bins = np.arange(int(np.floor(x_series.min())), int(np.ceil(x_series.max()) + 1 + (x_series.max() % 1 == 0)),
1)
elif num_bins_x is not None and x_bins is None:
x_bins = np.linspace(x_series.min(), x_series.max(), num_bins_x + 1)
elif x_bins is not None:
x_bins = x_bins
if num_bins_y is None and y_bins is None:
y_bins = np.arange(int(np.floor(y_series.min())), int(np.ceil(y_series.max()) + 1 + (y_series.max() % 1 == 0)),
1)
elif num_bins_y is not None and y_bins is None:
y_bins = np.linspace(y_series.min(), y_series.max(), num_bins_y + 1)
elif y_bins is not None:
y_bins = y_bins
var_binned_series_1 = | pd.cut(y_series, y_bins, right=False) | pandas.cut |
import gzip
import re
import tempfile
from collections import defaultdict
import numpy as np
import pandas as pd
from .. import access
# minimum number of valid samples per
# splicing event for ccle_splicing()
MIN_VALID_COUNT = 100
# minimum standard deviation per splicing
# event for ccle_splicing()
MIN_STDEV = 0.01
class Processors:
"""
CCLE dataset processors.
"""
@staticmethod
def ccle_annotations(raw_path):
"""
Process CCLE cell line annotations.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t")
df = df.astype(str)
return df
@staticmethod
def ccle_chromatin(raw_path):
"""
Process CCLE chromatin profiling estimates.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, index_col=1)
df = df.iloc[:, 1:]
return df
@staticmethod
def ccle_translocations_svaba(raw_path):
"""
Process CCLE SvABA calls.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_excel(raw_path)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
string_cols = [
"CCLE_name",
"map_id",
"bp1",
"bp2",
"class",
"gene1",
"gene2",
"site1",
"site2",
"fusion",
]
for col in string_cols:
df[col] = df[col].astype(str)
df["depmap_id"] = df["CCLE_name"].apply(ccle_to_depmap.get)
return df
@staticmethod
def ccle_rppa_info(raw_path):
"""
Process CCLE RPPA antibody info.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path)
df = df.astype(str)
df["format_id"] = (
df["Target_Genes"].apply(lambda x: x.replace(" ", "-"))
+ "_"
+ df["Antibody_Name"]
)
return df
@staticmethod
def ccle_rppa(raw_path):
"""
Process CCLE RPPA values.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, index_col=0)
ccle_rppa_info = access.load("ccle_rppa_info")
antibody_name_map = dict(
zip(ccle_rppa_info["Antibody_Name"], ccle_rppa_info["format_id"])
)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.columns = map(antibody_name_map.get, df.columns)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_gene_tpm(raw_path):
"""
Process CCLE gene TPM measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t", index_col=0)
df = df.iloc[:, 1:]
g19_7_definitions = access.load("g19_7_definitions")
gene_name_map = dict(
zip(g19_7_definitions["gene_id"], g19_7_definitions["gene_name"])
)
gene_name_map = defaultdict(str, gene_name_map)
gene_names = df.index.map(lambda x: f"{gene_name_map.get(x)}_{x}")
df = df.set_index(gene_names)
df = np.log2(df + 1)
df = df.astype(np.float16)
df = df.T
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_transcript_tpm(raw_path):
"""
Process CCLE transcript TPM measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t")
g19_7_definitions = access.load("g19_7_definitions")
gene_name_map = dict(
zip(g19_7_definitions["gene_id"], g19_7_definitions["gene_name"])
)
gene_name_map = defaultdict(str, gene_name_map)
df.index = df[["gene_id", "transcript_id"]].apply(
lambda x: f"{gene_name_map.get(x['gene_id'])}_{x['transcript_id']}", axis=1
)
df = df.drop(["gene_id", "transcript_id"], axis=1)
df = np.log2(df + 1)
df = df.astype(np.float16)
df = df.T
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_exonusage(raw_path):
"""
Process CCLE splicing measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
def reorder_exon(exon):
"""
Helper function for formatting exon
splicing IDs.
Args:
exon (str): exon identifier
Returns:
Processed DataFrame
"""
exon_split = exon.split("_")
return "_".join(exon_split[3:]) + "_" + "_".join(exon_split[:3])
temp = tempfile.NamedTemporaryFile(mode="wb")
with gzip.open(raw_path, "rb") as f:
for line in f:
line = re.sub(b"[^\S\t\n\r]+NA\t", b"nan\t", line)
line = re.sub(b"[^\S\t\n\r]+NA\n", b"nan\n", line)
temp.write(line)
df = pd.read_csv(temp.name, skiprows=2, index_col=0, sep="\t")
temp.close()
df.index = df.index.map(reorder_exon)
df.index = pd.Series(df.index, index=df.index) + "_" + pd.Series(df["gene_id"])
df = df.iloc[:, 1:]
df = df.T
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
exonusage_nans = df.isna().sum(axis=0)
keep_cols = df.columns[exonusage_nans < len(df) - MIN_VALID_COUNT]
df = df[keep_cols]
stdevs = df.std(axis=0)
df = df[df.columns[stdevs >= MIN_STDEV]]
df = df.astype(np.float16)
return df
@staticmethod
def ccle_mirna(raw_path):
"""
Process CCLE miRNA measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t", skiprows=2)
df.index = df["Description"] + "_" + df["Name"].apply(lambda x: x[1:])
df = df.iloc[:, 2:]
df = np.log2(df)
df = df.T
df = df.astype(np.float16)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_rrbs_tss1kb(raw_path):
"""
Process CCLE RRBS TSS 1kb measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t", index_col=0)
df = df.iloc[:-1, 2:]
df = df.T
df[df == "\tNA"] = np.nan
df[df == " NA"] = np.nan
df[df == " NA"] = np.nan
df = df.astype(np.float16)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_rrbs_tss_clusters(raw_path):
"""
Process CCLE RRBS TSS cluster measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t", index_col=0)
df = df.iloc[:-1, 2:]
df = df.T
df[df == "\tNA"] = np.nan
df[df == " NA"] = np.nan
df[df == " NA"] = np.nan
df = df.astype(np.float16)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_rrbs_cgi_clusters(raw_path):
"""
Process CCLE RRBS CGI cluster measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t", index_col=0)
df = df.iloc[:-1]
df["cluster_pos"] = df.index
df["cluster_n"] = df.groupby("cluster_pos").cumcount() + 1
df.index = df["cluster_pos"].astype(str) + "-" + df["cluster_n"].astype(str)
df = df.iloc[:, 2:-2]
df = df.T
df[df == "\tNA"] = np.nan
df[df == " NA"] = np.nan
df[df == " NA"] = np.nan
df = df.astype(np.float16)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_rrbs_enhancer_clusters(raw_path):
"""
Process CCLE RRBS enhancer cluster measurements.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_csv(raw_path, sep="\t", index_col=0)
df.index = df.index + "_" + df.groupby(level=0).cumcount().astype(str)
df = df.iloc[:, 2:]
df.index = df.index.map(lambda x: x.replace("_", "-")) + "_enh"
df = df.T
df[df == "\tNA"] = np.nan
df[df == " NA"] = np.nan
df[df == " NA"] = np.nan
df = df.astype(np.float16)
ccle_annotations = access.load("ccle_annotations")
ccle_to_depmap = dict(
zip(ccle_annotations["CCLE_ID"], ccle_annotations["depMapID"])
)
df.index = df.index.map(ccle_to_depmap.get)
return df
@staticmethod
def ccle_tertp(raw_path):
"""
Process CCLE TERT promoter calls.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = pd.read_excel(raw_path, skiprows=4)
df = df.set_index("depMapID")
df["TERTp_mut"] = df["TERT_promoter_mutation"] != "wildtype"
return df
@staticmethod
def ccle_msi(raw_path):
"""
Process CCLE MSI calls.
Args:
raw_path (str): the complete path to the
raw downloaded file
Returns:
Processed DataFrame
"""
df = | pd.read_excel(raw_path, sheet_name="MSI calls") | pandas.read_excel |
import itertools
import numpy as np
import cantera as ct
import pandas as pd
import re
import pickle
from .. import simulation as sim
from ...cti_core import cti_processor as ctp
class shockTube(sim.Simulation):
def __init__(self,pressure:float,temperature:float,observables:list,
kineticSens:int,physicalSens:int,conditions:dict,
initialTime,finalTime,thermalBoundary,mechanicalBoundary,
processor:ctp.Processor=None,cti_path="",save_timeHistories:int=0,
save_physSensHistories=0,moleFractionObservables:list=[],
absorbanceObservables:list=[],concentrationObservables:list=[],
fullParsedYamlFile:dict={},
time_shift_value:float = 0, atol:float=1e-15, rtol:float=1e-9,
rtol_sens:float=0.0001,
atol_sens:float=1e-6):
'''
Child class pertaining to shock tube simulations. Inherits all
attributes and methods from simulations class including __init__().
Also has its own internal init method due to additional data
requirements.
Parameters
----------
pressure : float
Pressure in [atm].
temperature : float
Temperature in [K].
observables : list
Species which sensitivity analysis is performed for.
kineticSens : int
0 for off, 1 for on.
physicalSens : int
0 for off, 1 for on.
conditions : dict
Initial mole fractions for species in simulation.
initialTime : float
Time to begin simulation from (s).
finalTime : float
Time to end simulation (s).
thermalBoundary : str
Thermal boundary condition inside the reactor. Shock tubes can
either be adiabatic or isothermal.
mechanicalBoundary : TYPE
Mechanical bondary condition inside the reactor. Shock tubes can
either be constant pressure or constant volume.
processor : ctp.Processor, optional
Loaded cti file. The default is None.
cti_path : str, optional
Path of cti file for running. If processor is provided this is not
needed. The default is "".
save_timeHistories : int, optional
Boolean variable describing if time histories for simulation runs
are saved. 0 for not saved, 1 for saved. The default is 0.
save_physSensHistories : TYPE, optional
Boolean variable describing if physical sensitivity time histories
are saved. 0 for not saved, 1 for saved. The default is 0.
moleFractionObservables : list, optional
Species for which experimental data in the form of mole fraction
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
absorbanceObservables : list, optional
Species for which experimental data in the form of summed
absorption time histories (from some or all of the species) will be
provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
concentrationObservables : list, optional
Species for which experimental data in the form of concentration
time histories will be provided for optimization.
Kinetic sensitivities are calculated for all these species.
The default is [].
fullParsedYamlFile : dict, optional
Full dictionary from the parsed shock tube yaml file.
The default is {}.
time_shift_value : float, optional
The numerical value by which the time vector of the simulation
is shifted in seconds. The default is 0.
atol : float, optional
Get the absolute error tolerance. The default is 1e-15.
rtol : float, optional
Get the relative error tolerance. The default is 1e-9.
rtol_sens : float, optional
Scalar relative error tolerance for sensitivity. The default is 0.0001.
atol_sens : float, optional
Scalar absolute error tolerance for sensitivity. The default is 1e-6.
Returns
-------
None.
'''
sim.Simulation.__init__(self,pressure,temperature,observables,kineticSens,physicalSens,
conditions,processor,cti_path)
self.initialTime = initialTime
self.finalTime = finalTime
self.thermalBoundary = thermalBoundary
self.mechanicalBoundary = mechanicalBoundary
self.kineticSensitivities= None
self.timeHistory = None
self.experimentalData = None
self.concentrationObservables = concentrationObservables
self.moleFractionObservables = moleFractionObservables
self.absorbanceObservables = absorbanceObservables
self.fullParsedYamlFile = fullParsedYamlFile
self.time_shift_value = time_shift_value
if save_timeHistories == 1:
self.timeHistories=[]
self.timeHistoryInterpToExperiment = None
self.pressureAndTemperatureToExperiment = None
else:
self.timeHistories=None
if save_physSensHistories == 1:
self.physSensHistories = []
self.setTPX()
self.dk = [0]
self.atol=atol
self.rtol=rtol
self.rtol_sensitivity=rtol_sens
self.atol_sensitivity=atol_sens
def printVars(self):
'''
Prints variables associated with the reactor initial conditions.
'''
print('initial time: {0}\nfinal time: {1}\n'.format(self.initialTime,self.finalTime),
'\nthermalBoundary: {0}\nmechanicalBoundary: {1}'.format(self.thermalBoundary,self.mechanicalBoundary),
'\npressure: {0}\ntemperature: {1}\nobservables: {2}'.format(self.pressure,self.temperature,self.observables),
'\nkineticSens: {0}\nphysicalSens: {1}'.format(self.kineticSens,self.physicalSens),
'\nTPX: {0}'.format(self.processor.solution.TPX)
)
#maybe unify paths with cti file?, also really fix the python styling
def write_time_histories(self, path=''):
if self.timeHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './time_histories.time'
pickle.dump(self.timeHistories,open(path,'wb'))
return 0
def write_physSensHistories(self, path=''):
if self.physSensHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './physSensHistories.sens'
pickle.dump(self.physSensHistories,open(path,'wb'))
return 0
def load_physSensHistories(self, path=''):
if self.physSensHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './physSensHistories.sens'
pickle.load(self.physSensHistories,open(path,'wb'))
return 0
#note this is destructive, the original timeHistories are overwritten, run before any runs
#same write is destructive by default
def load_time_histories(self, path=''):
if self.timeHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
if path=='':
path = './time_histories.time'
pickle.load(self.timeHistories,open(path,'wb'))
return 0
def settingShockTubeConditions(self):
'''
Determine the mechanical and thermal boundary conditions for a
shock tube based on what was initialized.
'''
#assigning the thermal boundary variable
if re.match('[aA]diabatic',self.thermalBoundary):
energy = 'on'
elif re.match('[iI]sothermal',self.thermalBoundary):
energy = 'off'
else:
raise Exception('Please specify a thermal boundary condition, adiabatic or isothermal')
#assigning the mehcanical boundary variable
if re.match('[Cc]onstant [Pp]ressure',self.mechanicalBoundary):
mechBoundary = 'constant pressure'
elif re.match('[Cc]onstant [Vv]olume',self.mechanicalBoundary):
mechBoundary = 'constant volume'
else:
raise Exception('Please specifiy a mehcanical boundary condition, constant pressure or constant volume')
#return the thermal and mechanical boundary of the shock tube
return energy,mechBoundary
def sensitivity_adjustment(self,temp_del:float=0.0,
pres_del:float=0.0,
spec_pair:(str,float)=('',0.0)):
'''
Appends the sensitivity adjustment to list, and calls sensitivity adjustment
function from the simulations class, to adjust P,T,X for the sensitivity
calculation
Parameters
----------
temp_del : float, optional
The decimal value of the percent by which temperature is perturbed.
The default is 0.0.
pres_del : float, optional
The decimal value of the percent by which pressure is perturbed.
The default is 0.0.
spec_pair : (str,float), optional
The string of a species and the decimal value of the percent by
which that species is perturbed . The default is ('',0.0).
Returns
-------
data : Pandas Data Frame
Time history of the perturbed simulation.
'''
if temp_del != 0.0:
self.dk.append(temp_del)
if pres_del != 0.0:
self.dk.append(pres_del)
if spec_pair[1] != 0.0:
self.dk.append(spec_pair[1])
kin_temp = self.kineticSens
self.kineticSens = 0
data = sim.Simulation.sensitivity_adjustment(self,temp_del,pres_del,spec_pair)
self.kineticSens = kin_temp
return data
def run(self,initialTime:float=-1.0, finalTime:float=-1.0):
'''
Run the shock tube simulation
Parameters
----------
initialTime : float, optional
The time at which the reactor simulation begins, in seconds. The default is -1.0.
finalTime : float, optional
The time at which the reactor simulation ends, in seconds. The default is -1.0.
Returns
-------
timeHistory: Pandas DataFrame
Time history of simulation containing temperature, pressurea and
species results.
kineticSensitivities: numpy array
three dimensional numpy array: (time x reaction x observable).
'''
if initialTime == -1.0:
initialTime = self.initialTime
if finalTime == -1.0:
finalTime = self.finalTime
self.timeHistory = None
self.kineticSensitivities= None #3D numpy array, columns are reactions with timehistories, depth gives the observable for those histories
conditions = self.settingShockTubeConditions()
mechanicalBoundary = conditions[1]
#same solution for both cp and cv sims
if mechanicalBoundary == 'constant pressure':
shockTube = ct.IdealGasConstPressureReactor(self.processor.solution,
name = 'R1',
energy = conditions[0])
else:
shockTube = ct.IdealGasReactor(self.processor.solution,
name = 'R1',
energy = conditions[0])
sim = ct.ReactorNet([shockTube])
sim.rtol=self.rtol
sim.atol=self.atol
#print(sim.rtol_sensitivity,sim.atol_sensitivity)
sim.rtol_sensitivity=self.rtol_sensitivity
sim.atol_sensitivity=self.atol_sensitivity
columnNames = [shockTube.component_name(item) for item in range(shockTube.n_vars)]
columnNames = ['time']+['pressure']+columnNames
self.timeHistory = pd.DataFrame(columns=columnNames)
if self.kineticSens == 1:
for i in range(self.processor.solution.n_reactions):
shockTube.add_sensitivity_reaction(i)
dfs = [pd.DataFrame() for x in range(len(self.observables))]
tempArray = [np.zeros(self.processor.solution.n_reactions) for x in range(len(self.observables))]
t = self.initialTime
counter = 0
#print(sim.rtol_sensitivity,sim.atol_sensitivity)
while t < self.finalTime:
t = sim.step()
if mechanicalBoundary =='constant volume':
state = np.hstack([t,shockTube.thermo.P,shockTube.mass,shockTube.volume,
shockTube.T, shockTube.thermo.X])
else:
state = np.hstack([t,shockTube.thermo.P, shockTube.mass,
shockTube.T, shockTube.thermo.X])
self.timeHistory.loc[counter] = state
if self.kineticSens == 1:
counter_1 = 0
for observable,reaction in itertools.product(self.observables, range(self.processor.solution.n_reactions)):
tempArray[self.observables.index(observable)][reaction] = sim.sensitivity(observable,
reaction)
counter_1 +=1
if counter_1 % self.processor.solution.n_reactions == 0:
dfs[self.observables.index(observable)] = dfs[self.observables.index(observable)].append(((
pd.DataFrame(tempArray[self.observables.index(observable)])).transpose()),
ignore_index=True)
counter+=1
if self.timeHistories != None:
self.timeHistory.time = self.timeHistory.time + self.time_shift_value
#self.timeHistory.time = self.timeHistory.time + 0
self.timeHistories.append(self.timeHistory)
############################################################
if self.kineticSens == 1:
numpyMatrixsksens = [dfs[dataframe].values for dataframe in range(len(dfs))]
self.kineticSensitivities = np.dstack(numpyMatrixsksens)
return self.timeHistory,self.kineticSensitivities
else:
return self.timeHistory
#interpolate the most recent time history against the oldest by default
#working_data used if have list not pandas frame
#return more data about what was interpolated in a tuple?
def interpolate_time(self,index:int=None,time_history=None):
'''
This function interpolates and returns the most recent time history
against the original time history by default, unless a specific time
history index or time history is passed in. If an index is passed in
then an interpolated time history associated with that index in the
list is returned. If a specific time_history is passed in then an
interpolated version of that time history is returned.
Parameters
----------
index : int, optional
The index value of the specific time history to be interpolated.
The default is None.
time_history : Pandas Data Frame, optional
Pandas dataframe containing the time history from a simulation.
The default is None.
Returns
-------
interpolatedTimeHistory: Pandas Data Frame
Interpolated time history.
'''
if self.timeHistories == None:
print("Error: this simulation is not saving time histories, reinitialize with flag")
return -1
else:
if index is not None and time_history is not None:
print("Error: can only specify one of index, time_history")
return -1
if index is None:
index = -1
if time_history is None:
return self.interpolation(self.timeHistories[0],self.timeHistories[index],["temperature","pressure"]+self.observables)
else:
return self.interpolation(self.timeHistories[0],time_history,["temperature","pressure"]+self.observables)
#assumes most recent time histories are the correct ones to interpolate on
#interpolates agains the original time history
def interpolate_species_adjustment(self):
'''
This function interpolates the time history of a species adjustment run,
against the original time history.
'''
interpolated_data = []
species_to_loop = set(self.conditions.keys()).difference(['Ar','AR','HE','He','Kr','KR','Xe','XE','NE','Ne'])
for x in range(0,len(species_to_loop)):
interpolated_data.insert(0,self.interpolate_time(index=-1-x))
return interpolated_data
def interpolate_species_sensitivities(self):
'''
This function interpolates the time history of a species sensitivity
against the original time history.
'''
interpolated_data = self.interpolate_species_adjustment()
interpolated_sens = []
ind_off = len(self.timeHistories)-len(set(self.conditions.keys()).difference(['Ar','AR','HE','He','Kr','KR','Xe','XE','NE','Ne']))
for i,th in enumerate(interpolated_data):
ind = ind_off + i
interpolated_sens.append(self.interpolate_physical_sensitivities(index=ind,time_history=th))
return interpolated_sens
#interpolate a range of time histories agains the original
#possibly add experimental flag to do range with exp data
#end is exclusive
#start here tomorrow
def interpolate_range(self,begin:int,end:int):
'''
Function that defines a time range for interpolation.
Parameters
----------
begin : int
Time to begin interpolation.
end : int
Time to end interpolation.
Returns
-------
interpolated_data : Pandas Data Frame
Interpolated time history.
'''
if begin<0 or end>len(self.timeHistories):
print("Error: invalid indices")
if self.timeHistories == None:
print("Error: simulation is not saving time histories")
interpolated_data = []
for x in range(begin,end):
interpolated_data.append(self.interpolate_time(index=x))
return interpolated_data
#interpolates agains the original time history
def interpolate_physical_sensitivities(self, index:int=-1,
time_history=None):
'''
This function interpolates the time history of a physical sensitivity,
excluding species, against the original time history and returns a
numpy array.
Parameters
----------
index : int, optional
The index value of the specific time history to be interpolated.
The default is -1.
time_history : Pandas Data Frame, optional
Pandas dataframe containing the time history from a simulation.
The default is None.
Returns
-------
sensitivity: Pandas Data Frame.
Physical sensitivity Data Frame.
'''
interpolated_time = self.interpolate_time(index) if time_history is None else time_history
#print(interpolated_time)
#calculate which dk
dk = self.dk[index]
sensitivity = self.sensitivityCalculation(self.timeHistories[0][self.observables],
interpolated_time[self.observables],self.observables,dk)
if self.physSensHistories != None:
self.physSensHistories.append(sensitivity)
# print('this is sensitivity')
# print(sensitivity)
return sensitivity
#returns a 3D array of interpolated time histories corrosponding to physical sensitivities
def interpolate_experimental_kinetic(self, pre_interpolated = []):
'''
This function interpolates kinetic sensitivities to experimental data.
Parameters
----------
pre_interpolated : list, optional
List of kinetic sensitivties to be interpolated. If not provided
the function will look for kinetic sensitivities that were saved
as an attribute.
The default is [].
Returns
-------
flipped: numpy array
Interpolated 3d array containing sensitivities.
'''
if self.experimentalData == None:
print("Error: experimental data must be loaded")
return -1
if len(pre_interpolated) == 0 and not self.kineticSensitivities.any():
print("Error: must specify pre_interpolated or have kineticSensitivities run first")
return -1
elif len(pre_interpolated)>0:
array = pre_interpolated
else:
array = self.kineticSensitivities
exp_interp_array = []
#if len(self.experimentalData) < array.shape[2]:
# print("Error: mismatch between kineticSensitivities observables and given experimental data")
# return -1
#exp data and kineticSensitivities must match in size and order of observables
for i,frame in enumerate(self.experimentalData):
if i > array.shape[2]:
break
sheet = array[:,:,i]
exp_interp_array.append([])
for time_history in sheet.T:
# new_history = np.interp(frame.ix[:,0],
# self.timeHistories[0]['time'],
# time_history)
new_history = np.interp(frame.iloc[:,0],
self.timeHistories[0]['time'],
time_history)
new_history = new_history.reshape((new_history.shape[0],
1))
exp_interp_array[-1].append(new_history)
flipped = []
for x in exp_interp_array:
flipped.append(np.hstack(x))
return flipped
def map_and_interp_ksens(self,time_history=None):
'''
This function maps kinetic sensitivity calculations returned from cantera
to kineitcs parameters A,n and Ea, as well as interpolates them to the
corresponding experimental data. It returns a dictonary containing the
interpolated kinetic senstivities.
Parameters
----------
time_history : Pandas Data Frame, optional
The original time history is required for obtaining temperature
values in order to do the mapping. The default is None.
Returns
-------
dict
Dictionary containing the mapping for kinetic sensitivities.
'''
A = self.kineticSensitivities
N = np.zeros(A.shape)
Ea = np.zeros(A.shape)
for i in range(0,A.shape[2]):
sheetA = A[:,:,i] #sheet for specific observable
for x,column in enumerate(sheetA.T):
N[:,x,i]= np.multiply(column,np.log(self.timeHistories[0]['temperature'])) if time_history is None else np.multiply(column,np.log(time_history['temperature']))
#not sure if this mapping is correct, check with burke and also update absorption mapping
#to_mult_ea = np.divide(-1,np.multiply(1/ct.gas_constant,self.timeHistories[0]['temperature'])) if time_history is None else np.divide(-1,np.multiply(ct.gas_constant,time_history['temperature']))
to_mult_ea = np.divide(-1,np.multiply(1,self.timeHistories[0]['temperature'])) if time_history is None else np.divide(-1,np.multiply(1,time_history['temperature']))
Ea[:,x,i]= np.multiply(column,to_mult_ea)
return {'A':self.interpolate_experimental_kinetic(A),
'N':self.interpolate_experimental_kinetic(N),
'Ea':self.interpolate_experimental_kinetic(Ea)}
#assumes pre_interpolated has been interpolated against the original time history
#assumes pre_interpolated is a list of dataframes where each dataframe is a time history
#single is a single dataframe representing one time history/run of the simulation
def interpolate_experimental(self,pre_interpolated = [], single = None):
'''
This function interpolates the time history of a physical sensitivity
against the corresponding experimental data and returns a pandas
dataframe.
Parameters
----------
pre_interpolated : list, optional
has been interpolated against the original time history,
assumes pre_interpolated is a list of dataframes where each dataframe
is a time history. The default is [].
single : Pandas Data Frame, optional
Single Pandas Data Frame to be interpolated against experimental
data. The default is None.
Returns
-------
int_exp: Pandas Data Frame
Interpolated data frame.
'''
if self.timeHistories == None:
print("Error: can't interpolate without time histories")
return -1
if self.experimentalData == None:
print("Error: must have experimental data before interpolation")
return -1
if len(pre_interpolated)!=0 and single != None:
print("Error: can only specify one of pre_interpolated, single")
if single is not None:
pre_interpolated = [single]
int_exp = []
#This portion of the function removes the pressure and the temperature from the pre_interpolated frames
#so that when it gets interpolated against experimental data the temperature and pressure are not there
if isinstance(pre_interpolated[0],pd.DataFrame):
if 'pressure' in pre_interpolated[0].columns.tolist() and 'temperature' in pre_interpolated[0].columns.tolist():
pre_interpolated = [df.drop(columns=['temperature','pressure']) for df in pre_interpolated]
#make sure you put the observables list in the correct order
#check what order the experimental list is parsed in
#making new observables list
if single is not None:
mole_fraction_and_concentration_observables= self.moleFractionObservables + self.concentrationObservables
mole_fraction_and_concentration_observables = [x for x in mole_fraction_and_concentration_observables if x is not None]
for time_history in pre_interpolated:
array_list = []
max_size = 0
for i, observable in enumerate(mole_fraction_and_concentration_observables):
interpolated_column = np.interp(self.experimentalData[i]['Time'].values,
self.timeHistories[0]['time'].values,
time_history[observable].values)
interpolated_column = np.reshape(interpolated_column,((interpolated_column.shape[0],1)))
array_list.append(interpolated_column)
max_size = max(interpolated_column.shape[0],max_size)
padded_arrays = []
for arr in array_list:
if arr.shape[0] < max_size:
padded_arrays.append(np.pad(arr,
((0,max_size-arr.shape[0]),(0,0)),
'constant',constant_values = np.nan))
else:
padded_arrays.append(arr)
np_array = np.hstack((padded_arrays))
new_frame = pd.DataFrame(np_array)
int_exp.append(new_frame)
for x in int_exp:
x.columns = mole_fraction_and_concentration_observables
return int_exp[0]
#check and make sure this part actually works for what we want for interpolating the pre interpolated time histories
#make sure we are getting the correct columns
else:
for time_history in pre_interpolated:
array_list = []
max_size = 0
for i,frame in enumerate(self.experimentalData): #each frame is data for one observable
if i>len(self.observables):
break
#change these bboth to use observable
# interpolated_column= np.interp(frame.ix[:,0],
# self.timeHistories[0]['time'],
# time_history.ix[:,i])
interpolated_column= np.interp(frame.iloc[:,0],
self.timeHistories[0]['time'],
time_history.iloc[:,i])
interpolated_column= np.reshape(interpolated_column,
((interpolated_column.shape[0],1)))
array_list.append(interpolated_column)
max_size = max(interpolated_column.shape[0],max_size)
padded_arrays= []
for arr in array_list:
if arr.shape[0] < max_size:
padded_arrays.append(np.pad(arr,
((0,max_size - arr.shape[0]),(0,0)),
'constant',constant_values=np.nan))
else:
padded_arrays.append(arr)
np_array = np.hstack((padded_arrays))
new_frame = pd.DataFrame(np_array)
int_exp.append(new_frame)
for x in int_exp:
x.columns = self.observables[0:len(self.experimentalData)]
if single is not None:
return int_exp[0]
else:
return int_exp
def interpolation(self,originalValues,newValues, thingBeingInterpolated):
'''
This function is the base interpolation function, interpolating one set
of data to another on a per time basis and returns a pandas dataframe.
Parameters
----------
originalValues : Pandas Data Frame
Original dataframe of time history.
newValues : Pandas Data Frame
New dataframe of time history.
thingBeingInterpolated : list
List of observable names.
Returns
-------
interpolatedData: Pandas Data Frame
Interpolated Data Frame.
'''
#interpolating time histories to original time history
if isinstance(originalValues,pd.DataFrame) and isinstance(newValues,pd.DataFrame):
tempDfForInterpolation = newValues[thingBeingInterpolated]
#tempListForInterpolation = [tempDfForInterpolation.ix[:,x].values for x in range(tempDfForInterpolation.shape[1])]
tempListForInterpolation = [tempDfForInterpolation.iloc[:,x].values for x in range(tempDfForInterpolation.shape[1])]
interpolatedData = [np.interp(originalValues['time'].values,newValues['time'].values,tempListForInterpolation[x]) for x in range(len(tempListForInterpolation))]
interpolatedData = [pd.DataFrame(interpolatedData[x]) for x in range(len(interpolatedData))]
interpolatedData = pd.concat(interpolatedData, axis=1,ignore_index=True)
interpolatedData.columns = thingBeingInterpolated
else:
print("Error: values must be pandas dataframes")
return -1
return interpolatedData
def sensitivityCalculation(self,originalValues,newValues,thingToFindSensitivtyOf,dk=.01):
'''
This function calculates log/log sensitivity and returns a
pandas dataframe.
Parameters
----------
originalValues : Pandas Data Frame
Original dataframe of time history.
newValues : Pandas Data Frame
New dataframe of time history.
thingToFindSensitivtyOf : list
List of observable names.
dk : float, optional
The decimal value of the percentage by which the original value
was perturbed. The default is .01.
Returns
-------
sensitivity: Pandas Data Frame
Sensitivity of observables.
'''
if isinstance(originalValues,pd.DataFrame) and isinstance(newValues,pd.DataFrame):
newValues.columns = thingToFindSensitivtyOf
newValues = newValues.applymap(np.log)
originalValues = originalValues.applymap(np.log)
#tab
sensitivity = (newValues.subtract(originalValues)/dk)
return sensitivity
else:
print("Error: wrong datatype, both must be pandas data frames")
return -1
def importExperimentalData(self,csvFileList):
'''
This function imports experimental data in csv format and returns a
list of pandas dataframes.
Parameters
----------
csvFileList : list
List of csv file directories.
Returns
-------
experimentalData : list
Experimental data from csv files as pandas data frames
stored in a list.
'''
print('Importing shock tube data the following csv files...')
print(csvFileList)
experimentalData = [pd.read_csv(csv) for csv in csvFileList]
experimentalData = [experimentalData[x].dropna(how='any') for x in range(len(experimentalData))]
experimentalData = [experimentalData[x].apply(pd.to_numeric, errors = 'coerce').dropna() for x in range(len(experimentalData))]
for x in range(len(experimentalData)):
experimentalData[x] = experimentalData[x][~(experimentalData[x][experimentalData[x].columns[1]] < 0)]
self.experimentalData = experimentalData
return experimentalData
def savingInterpTimeHistoryAgainstExp(self,timeHistory):
'''
This function writes the time history which is interpolated against
experimetnal data as a class object.
Parameters
----------
timeHistory : Pandas Data Frame
Time history interpolated against experimental data .
Returns
-------
timeHistoryInterpToExperiment: Pandas Data Frame
Time history that is being saved.
'''
self.timeHistoryInterpToExperiment = timeHistory
def interpolatePressureandTempToExperiment(self,simulation,experimental_data):
'''
This function interpolates the pressure and temperature time history
from a simulation against the corresponding experimental data.
Parameters
----------
simulation : class variable
The simulation assoicated with the time history to be
interpolated.
experimental_data : list
List of corresponding experimental data dataframtes .
Returns
-------
list_of_df : list
List of interpolated data frames for pressure and temperature.
'''
p_and_t = ['pressure','temperature']
list_of_df = []
for df in experimental_data:
temp = []
for variable in p_and_t:
interpolated_data = np.interp(df['Time'],simulation.timeHistories[0]['time'],simulation.timeHistories[0][variable])
interpolated_data = interpolated_data.reshape((interpolated_data.shape[0],1))
temp.append(interpolated_data)
temp = np.hstack(temp)
temp = pd.DataFrame(temp)
temp.columns = p_and_t
list_of_df.append(temp)
self.pressureAndTemperatureToExperiment = list_of_df
return list_of_df
def calculate_time_shift_sensitivity(self,simulation,experimental_data,dk=1e-8):
'''
This function interpolates the pressure and temperature time history
from a simulation against the corresponding experimental data.
Parameters
----------
simulation : class variable
The simulation assoicated with the time history to
be interpolated.
experimental_data : list
List of corresponding experimental data dataframtes .
dk : float, optional
Decimal percentage by which time values are perturbed. Default is
1e-8.
Returns
-------
time_shift_sensitivity : Pandas Data Frame
List of Pandas Data Frames containing time shift sensitivity
values.
'''
lst_obs = simulation.moleFractionObservables + simulation.concentrationObservables
lst_obs = [i for i in lst_obs if i]
one_percent_of_average = dk
original_time = simulation.timeHistories[0]['time']
new_time = original_time + one_percent_of_average
#interpolate to the orignal time
interpolated_against_original_time = []
for i,obs in enumerate(lst_obs):
interpolated_original_observable_against_original_time = np.interp(original_time,new_time,simulation.timeHistories[0][lst_obs[i]])
s1 = pd.Series(interpolated_original_observable_against_original_time,name=lst_obs[i])
interpolated_against_original_time.append(s1)
observables_interpolated_against_original_time_df = pd.concat(interpolated_against_original_time,axis=1)
#calculate sensitivity
calculated_sensitivity = []
for i,obs in enumerate(lst_obs):
sens = (observables_interpolated_against_original_time_df[obs].apply(np.log) - simulation.timeHistories[0][obs].apply(np.log))/one_percent_of_average
s1 = pd.Series(sens,name=lst_obs[i])
calculated_sensitivity.append(s1)
calculated_sensitivity_df = | pd.concat(calculated_sensitivity,axis=1) | pandas.concat |
import pandas as pd
from functools import reduce
from sklearn.decomposition import PCA
from sklearn import preprocessing
import numpy as np
import random as rd
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import sys
def determine_number_of_components(df):
"""Determine number of components based on the samples"""
samples = list(df.shape)[1]
if samples == 1:
n_components = 1
elif samples == 2:
n_components = 2
else:
n_components = 3
return n_components
def perform_pca(df, n_components):
"""Run PCA analysis"""
df = df.astype(float)
scaled_data = preprocessing.scale(df.T, with_std=False)
pca = PCA(n_components=n_components)
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
per_var = np.round(pca.explained_variance_ratio_* 100, decimals=1)
labels = ['PC' + str(x) for x in range(1, len(per_var)+1)]
return scaled_data, pca_data, per_var, labels
def generate_scree_plot(out, per_var, labels):
"""Generate scree plot"""
plt.figure()
plt.rcParams["figure.figsize"] = (20,20)
plt.bar(x=range(1,len(per_var)+1), height=per_var, tick_label=labels)
plt.ylabel('Percentage of Explained Variance')
plt.xlabel('Principal Component')
plt.title('Scree Plot')
plt.xticks(rotation='vertical')
plt.savefig(os.path.join(out, "scree_plot.pdf"))
plt.savefig(os.path.join(out, "scree_plot.png"))
plt.close()
def generate_pc1_pc2_plot(out, pca_df, per_var):
"""Scatterplot PC1 and PC2"""
plt.figure()
plt.rcParams["figure.figsize"] = (20,20)
plt.scatter(pca_df.PC1, pca_df.PC2)
plt.xlabel('PC1 - {0}%'.format(per_var[0]), fontsize=22)
plt.ylabel('PC2 - {0}%'.format(per_var[1]), fontsize=22)
plt.title('PCA components 1 & 2', fontsize=22)
for sample in pca_df.index:
plt.annotate(sample, (pca_df.PC1.loc[sample], pca_df.PC2.loc[sample]))
plt.savefig(os.path.join(out, "PCA_1_2.pdf"))
plt.savefig(os.path.join(out, "PCA_1_2.png"))
plt.close()
def generate_pc2_pc3_plot(out, pca_df, per_var):
"""Scatterplot PC1 and PC2"""
plt.figure()
plt.rcParams["figure.figsize"] = (20,20)
plt.scatter(pca_df.PC2, pca_df.PC3)
plt.xlabel('PC2 - {0}%'.format(per_var[1]), fontsize=22)
plt.ylabel('PC3 - {0}%'.format(per_var[2]), fontsize=22)
plt.title('PCA components 2 & 3', fontsize=22)
for sample in pca_df.index:
plt.annotate(sample, (pca_df.PC2.loc[sample], pca_df.PC3.loc[sample]))
plt.savefig(os.path.join(out, "PCA_2_3.pdf"))
plt.savefig(os.path.join(out, "PCA_2_3.png"))
plt.close()
def generate_pc1_pc3_plot(out, pca_df, per_var):
"""Scatterplot PC1 and PC3"""
plt.figure()
plt.rcParams["figure.figsize"] = (20,20)
plt.scatter(pca_df.PC1, pca_df.PC3)
plt.xlabel('PC1 - {0}%'.format(per_var[0]), fontsize=22)
plt.ylabel('PC3 - {0}%'.format(per_var[2]), fontsize=22)
plt.title('PCA components 1 & 3', fontsize=22)
for sample in pca_df.index:
plt.annotate(sample, (pca_df.PC1.loc[sample], pca_df.PC3.loc[sample]))
plt.savefig(os.path.join(out, "PCA_1_3.pdf"))
plt.savefig(os.path.join(out, "PCA_1_3.png"))
plt.close()
def generate_pca_3d(out, pca_df, per_var):
"""Scatterplot PC1, PC2, PC3"""
labels = []
pc1 = []
pc2 = []
pc3 = []
for i, row in pca_df.iterrows():
labels.append(i)
pc1.append(row["PC1"])
pc2.append(row["PC2"])
pc3.append(row["PC3"])
fig = plt.figure(figsize=(20,20))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(pc1, pc2, pc3)
ax.set_xlabel('PC1 - {0}%'.format(per_var[0]))
ax.set_ylabel('PC2 - {0}%'.format(per_var[1]))
ax.set_zlabel('PC3 - {0}%'.format(per_var[2]))
for label, x, y, z in zip(labels, pc1, pc2, pc3):
ax.text(x, y, z, label)
plt.savefig(os.path.join(out, "PCA_3D.pdf"))
plt.savefig(os.path.join(out, "PCA_3D.png"))
def counts2tpm(counts_df, lengths_df):
"""Convert a table of raw counts to TPM
Input-1: pandas dataframe of counts (the id should be the index)
Input-2: pandas dataframe of lengths (the id should be the index)
Output: pandas dataframe of TPMs
"""
df = pd.merge(lengths_df, counts_df, left_index=True, right_index=True)
counts = df.iloc[:,1:]
rate = (counts).div(df.iloc[:,0], axis=0)
denom = rate.sum()
tpm = rate/denom*1e6
return tpm
def counts2tpm_many(counts_df, lengths_df):
"""Convert a table of raw counts to TPM
Input-1: pandas dataframe of counts (the id should be the index)
Input-2: pandas dataframe of lengths (the id should be the index)
Each sample has a different length column
Output: pandas dataframe of TPMs
"""
columns = counts_df.columns
tpms = []
for column in columns:
temp_counts = pd.DataFrame(counts_df[column])
temp_length = | pd.DataFrame(lengths_df[column]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import datetime
import pandas as pd
# from numba import njit
from zvt import zvt_config
from zvt.api.data_type import Region, Provider, EntityType
from zvt.domain import BlockMoneyFlow, BlockCategory, Block
from zvt.contract import IntervalLevel
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.networking.request import sync_get
from zvt.utils.time_utils import to_pd_timestamp
from zvt.utils.utils import to_float
# ๅฎๆถ่ต้ๆต
# 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_bk?page=1&num=20&sort=netamount&asc=0&fenlei=1'
# 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_bk?page=1&num=20&sort=netamount&asc=0&fenlei=0'
class SinaBlockMoneyFlowRecorder(FixedCycleDataRecorder):
# ่ฎฐๅฝ็ไฟกๆฏไปๅช้ๆฅ
region = Region.CHN
provider = Provider.Sina
# entity็schema
entity_schema = Block
# ่ฎฐๅฝ็schema
data_schema = BlockMoneyFlow
url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_bkzj_zjlrqs?page=1&num={}&sort=opendate&asc=0&bankuai={}%2F{}'
def __init__(self, exchanges=None, entity_ids=None, codes=None, batch_size=10,
force_update=True, sleeping_time=10, default_size=zvt_config['batch_size'],
real_time=False, fix_duplicate_way='ignore', start_timestamp=None,
end_timestamp=None, close_hour=0, close_minute=0,
level=IntervalLevel.LEVEL_1DAY, kdata_use_begin_time=False,
one_day_trading_minutes=24 * 60) -> None:
super().__init__(EntityType.Block, exchanges, entity_ids, codes, batch_size,
force_update, sleeping_time, default_size, real_time,
fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, level, kdata_use_begin_time, one_day_trading_minutes)
def generate_url(self, category, code, number):
if category == BlockCategory.industry.value:
block = 0
elif category == BlockCategory.concept.value:
block = 1
return self.url.format(number, block, code)
def record(self, entity, start, end, size, timestamps, http_session):
url = self.generate_url(category=entity.category, code=entity.code, number=size)
text = sync_get(http_session, url, return_type='text')
if text is None:
return None
json_list = eval(text)
if len(json_list) == 0:
return None
# @njit(nopython=True)
def numba_boost_up(json_list):
result_list = []
for item in json_list:
result_list.append({
'name': entity.name,
'timestamp': to_pd_timestamp(item['opendate']),
'close': to_float(item['avg_price']),
'change_pct': to_float(item['avg_changeratio']),
'turnover_rate': to_float(item['turnover']) / 10000,
'net_inflows': to_float(item['netamount']),
'net_inflow_rate': to_float(item['ratioamount']),
'net_main_inflows': to_float(item['r0_net']),
'net_main_inflow_rate': to_float(item['r0_ratio'])
})
return result_list
result_list = numba_boost_up(json_list)
if len(result_list) > 0:
df = | pd.DataFrame.from_records(result_list) | pandas.DataFrame.from_records |
import json
import networkx as nx
import numpy as np
import os
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from config import logger, config
def read_profile_data():
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_df = pd.read_csv(config.profile_file)
profile_na.columns = profile_df.columns
profile_df = profile_df.append(profile_na)
return profile_df
def merge_raw_data():
tr_queries = pd.read_csv(config.train_query_file, parse_dates=['req_time'])
te_queries = pd.read_csv(config.test_query_file, parse_dates=['req_time'])
tr_plans = pd.read_csv(config.train_plan_file, parse_dates=['plan_time'])
te_plans = pd.read_csv(config.test_plan_file, parse_dates=['plan_time'])
tr_click = pd.read_csv(config.train_click_file)
trn = tr_queries.merge(tr_click, on='sid', how='left')
trn = trn.merge(tr_plans, on='sid', how='left')
trn = trn.drop(['click_time'], axis=1)
trn['click_mode'] = trn['click_mode'].fillna(0)
tst = te_queries.merge(te_plans, on='sid', how='left')
tst['click_mode'] = -1
df = pd.concat([trn, tst], axis=0, sort=False)
df = df.drop(['plan_time'], axis=1)
df = df.reset_index(drop=True)
df['weekday'] = df['req_time'].dt.weekday
df['day'] = df['req_time'].dt.day
df['hour'] = df['req_time'].dt.hour
df = df.drop(['req_time'], axis=1)
logger.info('total data size: {}'.format(df.shape))
logger.info('data columns: {}'.format(', '.join(df.columns)))
return df
def extract_plans(df):
plans = []
for sid, plan in tqdm(zip(df['sid'].values, df['plans'].values)):
try:
p = json.loads(plan)
for x in p:
x['sid'] = sid
plans.extend(p)
except:
pass
return pd.DataFrame(plans)
def generate_od_features(df):
feat = df[['o','d']].drop_duplicates()
feat = feat.merge(df.groupby('o')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='o')
feat.rename(columns={'day': 'o_nunique_day',
'hour': 'o_nunique_hour',
'pid': 'o_nunique_pid',
'click_mode': 'o_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby('d')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='d')
feat.rename(columns={'day': 'd_nunique_day',
'hour': 'd_nunique_hour',
'pid': 'd_nunique_pid',
'click_mode': 'd_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby(['o', 'd'])[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on=['o', 'd'])
feat.rename(columns={'day': 'od_nunique_day',
'hour': 'od_nunique_hour',
'pid': 'od_nunique_pid',
'click_mode': 'od_nunique_click'}, inplace=True)
return feat
def generate_pid_features(df):
feat = df.groupby('pid')[['hour', 'day']].nunique().reset_index()
feat.rename(columns={'hour': 'pid_nunique_hour', 'day': 'pid_nunique_day'}, inplace=True)
feat['nunique_hour_d_nunique_day'] = feat['pid_nunique_hour'] / feat['pid_nunique_day']
feat = feat.merge(df.groupby('pid')[['o', 'd']].nunique().reset_index(), how='left', on='pid')
feat.rename(columns={'o': 'pid_nunique_o', 'd': 'pid_nunique_d'}, inplace=True)
feat['nunique_o_d_nunique_d'] = feat['pid_nunique_o'] / feat['pid_nunique_d']
return feat
def generate_od_cluster_features(df):
G = nx.Graph()
G.add_nodes_from(df['o'].unique().tolist())
G.add_nodes_from(df['d'].unique().tolist())
edges = df[['o','d']].apply(lambda x: (x[0],x[1]), axis=1).tolist()
G.add_edges_from(edges)
cluster = nx.clustering(G)
cluster_df = pd.DataFrame([{'od': key, 'cluster': cluster[key]} for key in cluster.keys()])
return cluster_df
def gen_od_feas(data):
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
logger.info('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv(config.pid_feature_file)
data = data.merge(feat, how='left', on='pid')
return data
def gen_od_feat(data):
feat = pd.read_csv(config.od_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
logger.info('sid shape={}'.format(sid.shape))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
logger.info('feature shape={}'.format(feat.shape))
logger.info('feature columns={}'.format(feat.columns))
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv(config.od_cluster_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
data['o1-m-o2'] = np.abs(data['o1'] - data['o2'])
data['d1-m-d2'] = np.abs(data['d1'] - data['d2'])
data['od_area'] = data['o1-m-o2']*data['d1-m-d2']
data['od_ratio'] = data['o1-m-o2']/data['d1-m-d2']
return data
def gen_od_mode_cnt_feat(data):
feat = pd.read_csv(config.od_mode_cnt_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weekday_hour_cnt_feat(data):
feat = pd.read_csv(config.weekday_hour_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','req_time'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','req_time'])
sid = pd.concat((tr_sid, te_sid))
sid['req_time'] = pd.to_datetime(sid['req_time'])
sid['hour'] = sid['req_time'].map(lambda x: x.hour)
sid['weekday'] = sid['req_time'].map(lambda x: x.weekday())
feat = sid.merge(feat, how='left', on=['hour','weekday']).drop(['hour','weekday','req_time'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_plan_agg_feat(data):
#feat = pd.read_csv(config.od_plan_agg_feature_file)
#tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d','req_time'])
#te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d', 'req_time'])
#sid = pd.concat((tr_sid, te_sid))
#sid['req_time'] = pd.to_datetime(sid['req_time'])
#sid['hour'] = sid['req_time'].map(lambda x: x.hour)
#feat = sid.merge(feat, how='left', on=['o','d','hour']).drop(['o','d','hour','req_time'], axis=1)
feat = pd.read_csv(config.od_plan_agg_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_feat(data):
feat = pd.read_csv(config.mode_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_stats_feat(data):
feat = pd.read_csv(config.od_stats_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_daily_plan_feat(data):
feat = pd.read_csv(config.daily_plan_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weather_feat(data):
feat = pd.read_csv(config.weather_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_pid_count_feat(data):
feat = pd.read_csv(config.od_pid_count_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_plan_ratio_feat(data):
feat = pd.read_csv(config.plan_ratio_file)
data = data.merge(feat, how='left', on='sid')
return data
def generate_f1(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f1')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
df = gen_od_feas(df)
df = gen_plan_feas(df)
df = gen_profile_feas(df)
df = gen_ratio_feas(df)
df = gen_fly_dist_feas(df)
df = gen_aggregate_profile_feas(df) # 0.6759966661470926
df = gen_pid_feat(df) # 0.6762996872664375
df = gen_od_feat(df) # without click count: 0.6780576865566392; with click count: 0.6795810670221226
df = gen_od_cluster_feat(df) # 0.6796523605372234
df = gen_od_eq_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def generate_f2(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f2')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
trn, tst = generate_f1(df)
df = pd.concat((trn, tst))
df = gen_od_mode_cnt_feat(df) # [+] fold #0: 0.6835031183515229
df = gen_weekday_hour_cnt_feat(df)
df = gen_od_plan_agg_feat(df)
df = gen_mode_feat(df)
#df = gen_mode_stats_feat(df)
## df = gen_weather_feat(df)
#df = gen_daily_plan_feat(df)
#df = gen_od_pid_count_feat(df)
## df = gen_plan_ratio_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def generate_f3(df):
trn_feat_name, tst_feat_name = config.get_feature_name('f1')
if os.path.exists(trn_feat_name) and os.path.exists(tst_feat_name):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(trn_feat_name)
tst = pd.read_csv(tst_feat_name)
else:
trn, tst = generate_f2(df)
df = pd.concat((trn, tst))
#df = gen_mode_stats_feat(df)
## df = gen_weather_feat(df)
#df = gen_daily_plan_feat(df)
#df = gen_od_pid_count_feat(df)
## df = gen_plan_ratio_feat(df)
trn = df[df['click_mode'] != -1]
tst = df[df['click_mode'] == -1]
return trn, tst
def get_train_test_features():
config.set_feature_name('f1')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f1.')
trn, tst = generate_f1(df)
logger.info('saving the training and test f1 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features2():
config.set_feature_name('f2')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f2.')
trn, tst = generate_f2(df)
logger.info('saving the training and test f2 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features2a():
config.set_feature_name('f2')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f2.')
trn, tst = generate_f2(df)
logger.info('saving the training and test f2 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/build/feature/od_coord_feature.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_time.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_min.csv')
trn = trn.merge(feat, how='left', on='sid')
tst = tst.merge(feat, how='left', on='sid')
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features3():
config.set_feature_name('f3')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = pd.read_csv(config.test_feature_file)
else:
df = merge_raw_data()
logger.info('generating feature f3.')
trn, tst = generate_f3(df)
logger.info('saving the training and test f3 features.')
trn.to_csv(config.train_feature_file, index=False)
tst.to_csv(config.test_feature_file, index=False)
y = trn['click_mode'].values
sub = tst[['sid']].copy()
trn.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
tst.drop(['sid', 'pid', 'click_mode'], axis=1, inplace=True)
return trn, y, tst, sub
def get_train_test_features4():
config.set_feature_name('f4')
if os.path.exists(config.train_feature_file) and os.path.exists(config.test_feature_file):
logger.info('loading the training and test features from files.')
trn = pd.read_csv(config.train_feature_file)
tst = | pd.read_csv(config.test_feature_file) | pandas.read_csv |
import pandas as pd
import numpy as np
def construct_freq_df(df_copy):
'''
Construct a dataframe such that indices are seperated by delta 1 min from the Market Data
and put it in a format that markov matrices can be obtained by the pd.crosstab() method
'''
#This is here in case user passes the actual dataframe, we do not want to modify the actual dataframe
df = df_copy.copy()
#Blank dataframe placeholder
frames = pd.DataFrame()
#Set the index to timestamp and convert it to pd timestamp
#The datatype of the timestamp column should be string
df.set_index('timestamp', inplace=True)
df.index = pd.to_datetime(df.index)
#We need to get customer behaviour from entry to checkout for each unique customerr
for customer in df['customer_no'].unique():
#get customer
temp_df = df[df['customer_no'] == customer]
#expand timestamp index such that delta T is 1 min, and forward fill isles
temp_df = temp_df.asfreq('T',method='ffill')
#insert 'entry' 1 min before first isle
#re sort index so that times make sense
#(WE MIGHT NEED TO SKIP THIS NOT SURE IF ENTRY STATE IS REQUIRED)
temp_df.loc[temp_df.index[0] - pd.to_timedelta('1min')] = [customer,'entry']
temp_df.sort_index(inplace=True)
#after is simply a shift(-1) of current location
#checkout location does not have an after, so drop the NA's here
temp_df['after'] = temp_df['location'].shift(-1)
temp_df.dropna(inplace=True)
#join the frequency table for each customer
frames = pd.concat([frames, temp_df], axis=0)
#return the frequency frame
return frames
def generate_markov_matrix(df_copy):
'''
Generate the Markov Matrix for a Market Data dataframe, structured by constuct_freq_df() function
NOTE: Columns indicate current state, rows indicate after state, probabilities are read current -> after probability
sum of columns should add to 1. Since Checkout state is a sink, all after probabilities are 0, not calculated.
'''
df = df_copy.copy()
return pd.crosstab(df['after'], df['location'], normalize=1)
class Customer:
def __init__(self, idn, state, transition_mat):
self.id = idn
self.state = state
self.transition_mat = transition_mat
self.tr_array_dict = {
'dairy' : self.transition_mat[0,:],
'drinks' : self.transition_mat[1,:],
'entry' : self.transition_mat[2,:],
'fruit' : self.transition_mat[3,:],
'spices' : self.transition_mat[4,:]
}
def __repr__(self):
"""
Returns a csv string for that customer.
"""
return f'{self.id};{self.state}'
def is_active(self):
"""
Returns True if the customer has not reached the checkout
for the second time yet, False otherwise.
"""
if self.state != 'checkout':
return True
if self.state == 'checkout':
return False
def next_state(self):
"""
Propagates the customer to the next state
using a weighted random choice from the transition probabilities
conditional on the current state.
Returns nothing.
"""
self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])
class SuperMarket:
"""manages multiple Customer instances that are currently in the market.
"""
def __init__(self,transition_matrix):
#List contains the customer objects
self.customers = []
#Timing stuff set to some defults, open and close time get their values from the simulate() method when called
self.open_time = pd.to_datetime('08:00',format='%H:%M')
self.close_time = pd.to_datetime('17:00',format='%H:%M')
self.current_time = pd.to_datetime('08:00',format='%H:%M')
#Customer id counter, so that we can consistently assign ids to new customers
self.last_id = 0
#current and total state during a simulation, total state is initiated like this because it becomes the header of a dataframe
#when returned from results() method, also it needs to be in 1x3 shapre for np.vstack() to work in update_total_state()
self.current_state = np.array([])
self.total_state = np.array(['timestamp','customer_id','customer_location'])
#transition matrix is assigned when initiating the SuperMarket object
self.transition_matrix = transition_matrix
def __repr__(self):
pass
def write_current_state(self):
"""
writes the current state during a simulation. Makes rows with current time, customer.id and customer.state of current customers in the market
"""
self.current_state = np.array([[self.current_time, customer.id, customer.state] for customer in self.customers])
def update_total_state(self):
"""
updates the total state, this is constantly updated by the current state during a simulation which yields the final data from the simulation
can be directly accessed or returned as a neat dataframe by the results() method
"""
self.total_state = np.vstack((self.total_state,self.current_state))
def next_minute(self):
"""propagates all customers to the next state. Adds one minute to current time and updates all customers in the market to their next state
"""
self.current_time += pd.Timedelta(1,'m')
#self.customers = [customer.next_state() for customer in self.customers]
for customer in self.customers:
customer.next_state()
#return get_time()
def add_new_customers(self, n_customers):
"""randomly creates new customers. Adds n_customer number of customers to the current list, they all start at the entry, and assigned
an id that is +1 of the current id. Afterwards updates the last id by the latest customer
"""
self.customers = self.customers + [Customer(self.last_id + 1 + i, 'entry', self.transition_matrix) for i in range(n_customers)]
self.last_id = self.customers[-1].id
def remove_exiting_customers(self):
"""removes every customer that is not active any more. Goes through the customer list and if they are active keeps them,
the ones in checkout are dropped
"""
self.customers = [customer for customer in self.customers if customer.is_active() == True]
def count_checkout(self):
"""
counts the number of customers that are at checkout at the current_state. This would be easier if current_state was a dataframe
but since it is a numpy matrix we return the submatrix where the 3rd row is checkout, then we "pseudo count" them by looking at the shape
"""
row_mask = (self.current_state[:,2] == 'checkout')
return self.current_state[row_mask,:].shape[0]
def simulate(self,initial_customers=20,open_time='8:00',close_time='8:10'):
"""
Simulates the SuperMarket. Gets initial customers, opening time and closing time from the user
"""
self.current_state = np.array([])
self.total_state = np.array(['timestamp','customer_id','customer_location'])
#Timing stuff
self.open_time = | pd.to_datetime(open_time,format='%H:%M') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 11:32:39 2019
@author: github.com/sahandv
"""
import sys, os, time
import platform
from pathlib import Path
def scopus_initialize(ignore_py_version = False,
ignore_scopus_config = False):
if platform.system() != 'Linux':
print("\n * It is recommended to use a Linux system.")
if ignore_py_version is False:
try:
assert sys.version_info >= (3, 5)
except AssertionError:
sys.exit("\n * Please use Python 3.5 +")
try:
import mmap
except ImportError:
sys.exit("\n * Please install mmap.")
try:
import tqdm
except ImportError:
sys.exit("\n * Please install tqdm.")
try:
import pandas
except ImportError:
sys.exit("\n * Please install pandas.")
try:
import nltk
except ImportError:
sys.exit("\n * Please install nltk.")
if ignore_scopus_config is False:
try:
import scopus
except ImportError:
sys.exit("\n * Please install scopus package before using this code. Try usng 'pip install scopus'.")
my_file = Path(str(Path.home())+'/.scopus/config.ini')
if my_file.is_file():
print("\n * Configuration file already exists at "+str(Path.home())+'/.scopus/config.ini'+". You may the file and edit the entries manually.")
else:
scopus.utils.create_config()
def search_scopus(query, download = True):
from scopus import ScopusSearch
result = ScopusSearch(query,download = download)
return result
def retrieve_abstract_try(eid,view = 'REF',param = 'references'):
from scopus import AbstractRetrieval
try:
refs = AbstractRetrieval(eid, view = view)._json[param]
except KeyError:
print('An error occurred (1) ...')
return 1
except UnboundLocalError:
print('An error occurred (2). Probably an empty eID provided? ')
return 2
except KeyboardInterrupt:
sys.exit("Interrupting due to user command.")
except:
print('An error occurred (?)...')
return 0
else:
return refs
# =============================================================================
# Get references for document by eID
#
# eid : the publication scopus eid
# retry : the number of retries if it fails due to some reason like quota
# force : don't prompt to fail the program and go more than retry limit
# =============================================================================
def get_refs_by_eid(eid, retry = 1, force = False):
# from scopus import AbstractRetrieval
import pandas as pd
refs = None
for i in range(retry+1):
refs = retrieve_abstract_try(eid)
if refs == 1 or refs == 2 or (refs == 0 and retry == 0):
print('Returning None.')
return None, None, None
if refs == 0:
print("Trying again: " , i ," of ", retry)
if i >= retry and force is False:
print("How many more retries do you want? [0 means break] ")
input_val = input()
input_val = int(input_val)
if input_val == 0:
print('Returning None.')
return None, None, None
else:
get_refs_by_eid(eid, input_val, force)
if refs != 1 and refs != 2 and refs != 0:
break
try:
ref_list_full = refs['reference']
ref_count = refs['@total-references']
except TypeError:
print('Returning None.')
return None, None, None
if ref_count == '1' or ref_count == 1:
print('The article has only 1 refrences.')
ref_list_full = [ref_list_full]
if ref_count == '0' or ref_count == 0:
print('The article has 0 refrences! Returning None and ignoring.')
return None, None, None
ref_list_full_df = pd.DataFrame(ref_list_full)
try:
ref_list_eid = ref_list_full_df['scopus-eid']
except KeyError:
ref_list_eid = None
return ref_list_eid,ref_list_full,ref_count
# =============================================================================
# Fetch references for a series of eIDs
# =============================================================================
def get_refs_from_publications_df(dataframe_eid,verbose = True,retry = 0, force = False):
import pandas as pd
import psutil
memory = psutil.virtual_memory()
if len(dataframe_eid) > 10000 and memory[1]/1000000000 < 8:
input("There are too many records to fetch (>10k) for your free memory. Please free up memory or press Enter to continue anyway.")
all_refs = []
valid_eids = []
for eid in dataframe_eid:
if verbose is True:
print('Fetching references for ',eid)
refs_eid, _, _, = get_refs_by_eid(eid,retry,force)
if refs_eid is not None:
all_refs.append(refs_eid)
valid_eids.append(eid)
return valid_eids,all_refs
# =============================================================================
# Get number of lines in file
# =============================================================================
def get_num_lines(file_address):
import mmap
fp = open(file_address, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
# =============================================================================
# Read author properties from 'author' field
# =============================================================================
def read_author(author_field,retry_no=0):
import json
retry_no+=1
author_json = None
author_field = author_field.replace("\'", "\"")
author_field = author_field.replace("None", "\"\"")
try:
author_json = json.loads(author_field)
except json.JSONDecodeError as decodererror:
if 'Expecting value' in decodererror.msg:
author_field = author_field[1:-1]
read_author(author_field,retry_no)
if retry_no>4:
return author_json
# if 'Extra data' in decodererror.msg:
# author_field = author_field[2:-2]
# author_field = author_field.split('}, {')
# author_json = []
# for row in author_field:
# row = '{'+row+'}'
# try:
# author_json.append(json.loads(row))
# except json.JSONDecodeError as decodererror:
# print(decodererror.args,'\n',row)
return author_json
# =============================================================================
# Turn author dictionary to string in WoS format
# =============================================================================
def authors_to_string(authors_row):
if authors_row != '':
strings = []
for author in authors_row:
strings.append(author['surname']+', '+author['initials'].replace('.',''))
return '; '.join(strings)
else:
return ''
# =============================================================================
# Turn JSON data to Pandas DataFrame
# =============================================================================
def json_to_df(file_address = None,verbose = True):
import json
from tqdm import tqdm
import pandas as pd
if file_address is None:
from stat import S_ISREG, ST_CTIME, ST_MODE
dir_path = str(Path.home())+'/.scopus/scopus_search/COMPLETE/'
data = (os.path.join(dir_path, fn) for fn in os.listdir(dir_path))
data = ((os.stat(path), path) for path in data)
data = ((stat[ST_CTIME], path)
for stat, path in data if S_ISREG(stat[ST_MODE]))
print("\n * No json file was supplied. Searching the scopus cache files...")
for cdate, path in sorted(data):
print(' - ', time.ctime(cdate), os.path.basename(path))
file_address = path
if file_address is not None:
print("Will process ",file_address,'. File size: ',int(os.path.getsize(file_address)/1000000),'MB')
try:
input("Press enter to process...")
except SyntaxError:
pass
else:
sys.exit("Please supply a json file. The cache was also empty. :(")
all_publications = []
counter = 0
with open(file_address) as publication_data_text:
for line in tqdm(publication_data_text, total=get_num_lines(file_address)):
publication_data_dict = json.loads(line)
try:
publication_data_dict["author_keywords"] = publication_data_dict['authkeywords'].split(" | ")
except KeyError:
if verbose is True:
print(counter," - This publication has no author keywords!")
all_publications.append(publication_data_dict)
counter+=1
print("\nFinished parsing.")
return | pd.DataFrame(all_publications) | pandas.DataFrame |
# ไฝ ๅฅฝ, ๆฌข่ฟไฝฟ็จ EF ้ฅฎ้ฃ่ฎก็ฎๅจ
# ไฝ ๅช้่ฆๆนไธคไธชๆฐๅญๅฐฑๅฏไปฅไฝฟ็จไบ:
heat_required = 2500 # ่ฏทๆๆฐๅญ2500ๆนๆไฝ ๆ้่ฆๆๅ
ฅ็ๆป่ฝ้
required_cp_ratio=2.5 # ๅฆๆไฝ ๅธๆ็ขณๆฐดๆไพ็่ฝ้ๆฏ่็ฝ่ดจๆไพ็่ฝ่ฝ้็ไธๅ, ๅฐฑๆ2ๆนๆ3
# ๆ็
ง่ฟไธชๆ ผๅผๆทปๅ ไฝ ้่ฆ็้ฃ็ฉๆฐๆฎ
# '้ธก่ธ่': [7.72, 0, 29.55] ็ๆๆๆฏ:
# ้ฃ็ฉๅ็งฐ:้ธก่ธ่
# ๆฏ100g้ธก่ธ่ๅซๆ7.72g่่ช
# ๆฏ100g้ธก่ธ่ๅซๆ0g็ขณๆฐดๅๅ็ฉ
# ๆฏ100g้ธก่ธ่ๅซๆ29.55g่่ช
# ๅฆๆ่ฆๆทปๅ ๆฐๆฎ, ้่ฆๅจ '่ธๅ็': [0.07, 5.33, 0.8] ๅ้ขๅ ๅ
ฅไธๆกๆฐๆฐๆฎ
# ๆณจๆ, ๆฏไธช้ฃ็ฉ็ๆฐๆฎไน้ด่ฆ็จ้ๅท้ๅผ
data_dict = {'้ธก่ธ่': [7.72, 0, 29.55], '็ฑณ้ฅญ': [0.33, 25.86, 2.60], '็บข่ฏ': [0.05, 20.45, 1.57], '็ๆ': [15.01, 0, 27.29], '่ธๅ็': [0.07, 5.33, 0.8],'้ขๅ
':[3.29,50.61,7.64],'ๅฏๅฃๅฏไน':[0,10.6,0]}
# ไธ้ข็ไธ้่ฆ็
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
#๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ๏ผ
import numpy as np
import pandas as pd
import time
start=time.time()
def data_function(data_dict):
data = pd.DataFrame(data_dict)
data.shape[1]
for i in range(data.shape[0]):
data.loc[i] = | pd.to_numeric(data.iloc[i, :], errors='coerce') | pandas.to_numeric |
import re
import time
from datetime import datetime
import json
import numpy as np
import pandas as pd
from utilities.canvas_api import (
get_courses,
get_outcome_results,
get_course_users,
get_users,
get_enrollment_terms,
)
from utilities.cbl_calculator import calculate_traditional_grade
from utilities.db_functions import (
insert_grades_to_db,
create_record,
delete_outcome_results,
upsert_alignments,
upsert_users,
upsert_outcome_results,
upsert_outcomes,
upsert_courses,
query_current_outcome_results,
get_db_courses,
insert_course_students,
delete_course_students,
delete_grades_current_term,
upsert_enrollment_terms,
get_calculation_dictionaries,
get_current_term,
get_sync_terms
)
def make_grade_object(grade, outcome_avgs, record_id, course, user_id):
"""
Creates grade dictionary for grades table
:param grade: Letter Grade
:param outcome_avgs: List of outcome average info dictionaries
:param record_id: Current Record ID
:param course: Course Dictionary
:param user_id: User ID
:return: return dictionary formatted for Grades Table
"""
# store in a dict
grade = dict(
user_id=user_id,
course_id=course["id"],
grade=grade["grade"],
threshold=grade["threshold"],
min_score=grade["min_score"],
record_id=record_id,
outcomes=outcome_avgs,
)
return grade
def outcome_results_to_df_dict(df):
return df.to_dict("records")
def make_empty_grade(course, grades_list, record_id, user_id):
"""
Create empty grade for non-graded courses
:param course: course dictionary
:param grades_list: list to append empty grade to
:param record_id: record ID
:param user_id: user_id
:return:
"""
empty_grade = {"grade": "n/a", "threshold": None, "min_score": None}
grade = make_grade_object(empty_grade, [], record_id, course, user_id)
grades_list.append(grade)
def make_outcome_result(outcome_result, course_id, enrollment_term):
temp_dict = {
"id": outcome_result["id"],
"score": outcome_result["score"],
"course_id": course_id,
"user_id": outcome_result["links"]["user"],
"outcome_id": outcome_result["links"]["learning_outcome"],
"alignment_id": outcome_result["links"]["alignment"],
"submitted_or_assessed_at": outcome_result["submitted_or_assessed_at"],
"last_updated": datetime.utcnow(),
# 'enrollment_term': enrollment_term
}
return temp_dict
def format_outcome(outcome):
try:
temp_dict = {
"id": outcome["id"],
"display_name": outcome["display_name"],
"title": outcome["title"],
"calculation_int": outcome["calculation_int"],
}
except:
temp_dict = {
"id": outcome["id"],
"display_name": outcome["display_name"],
"title": outcome["title"],
"calculation_int": 65,
}
print("*****************")
print("there was a calc_int error")
return temp_dict
return temp_dict
def format_alignments(alignment):
ids = ["id", "name"]
return {_id: alignment[_id] for _id in ids}
def pull_outcome_results(current_term, engine):
# get all courses for current term todo - pull from database
current_term_id = current_term["id"]
courses = get_courses(current_term_id)
# get outcome result rollups for each course and list of outcomes
pattern = "@dtech|Teacher Assistant|LAB Day|FIT|Innovation Diploma FIT"
count = 0
for idx, course in enumerate(courses):
print(course["id"])
print(f'{course["name"]} is course {idx + 1} our of {len(courses)}')
# Check if it's a non-graded course
if re.match(pattern, course["name"]):
print(course["name"])
continue
# get course users
users = get_course_users(course)
user_ids = [user["id"] for user in users]
outcome_results, alignments, outcomes = get_outcome_results(
course, user_ids=user_ids
)
# Format results, Removed Null filter (works better for upsert)
res_temp = [
make_outcome_result(outcome_result, course["id"], current_term_id)
for outcome_result in outcome_results
]
# filter out any duplicates (this shouldn't be an issue but a duplicate sometimes shown up)
done = []
outcome_results = []
for res in res_temp:
if res['id'] not in done:
done.append(res['id'])
outcome_results.append(res)
# outcome_results = [i for n, i in enumerate(outcome_results) if i not in outcome_results[n + 1:]]
# Format outcomes
outcomes = [format_outcome(outcome) for outcome in outcomes]
# Filter out duplicate outcomes
outcomes = [
val for idx, val in enumerate(outcomes) if val not in outcomes[idx + 1 :]
]
# format Alignments
alignments = [format_alignments(alignment) for alignment in alignments]
# filter out duplicate alignments
alignments = [
val
for idx, val in enumerate(alignments)
if val not in alignments[idx + 1 :]
]
# If there are results to upload
if outcome_results:
upsert_outcomes(outcomes, engine)
print("outcome upsert complete")
upsert_alignments(alignments, engine)
print("alignment upsert complete")
print(f'deleting outcome_results for {course["name"]}')
delete_outcome_results(course["id"], engine)
print("old outcome results deleted")
print(f"outcomes results to upload: {len(outcome_results)}")
upsert_outcome_results(outcome_results, engine)
print("result upsert complete")
# count = count + 1
# if count > 1:
# break
def insert_grades(current_term, engine):
print(f"Grade pull started at {datetime.now()}")
calculation_dictionaries = get_calculation_dictionaries(engine)
# check if the cut off date has been set
if current_term["cut_off_date"]:
cut_off_date = current_term["cut_off_date"]
# if not, set it to the last day of the term
else:
cut_off_date = current_term["end_at"]
outcome_results = query_current_outcome_results(current_term["id"], engine)
# Check if there are any outcome results in the current_term. If not exit.
if outcome_results.empty:
print(f"Term {current_term['id']} has no outcome results.")
return None
drop_eligible_results = outcome_results.loc[
outcome_results["submitted_or_assessed_at"] < cut_off_date
]
# get min score from drop_eligible_results
group_cols = ["links.user", "course_id", "outcome_id"]
min_score = (
drop_eligible_results.groupby(group_cols)
.agg(min_score=("score", "min"))
.reset_index()
)
full_avg = (
outcome_results.groupby(group_cols)
.agg(full_avg=("score", "mean"), count=("score", "count"), sum=("score", "sum"))
.reset_index()
)
outcome_avgs = pd.merge(min_score, full_avg, on=group_cols)
outcome_avgs["drop_avg"] = (outcome_avgs["sum"] - outcome_avgs["min_score"]) / (
outcome_avgs["count"] - 1
)
# Pick the higher average
outcome_avgs["outcome_avg"] = np.where(
outcome_avgs["drop_avg"] > outcome_avgs["full_avg"],
outcome_avgs["drop_avg"],
outcome_avgs["full_avg"],
)
# calculate the grades
group_cols = ["links.user", "course_id"]
grades = outcome_avgs.groupby(group_cols).agg(
grade_dict=(
"outcome_avg",
lambda x: calculate_traditional_grade(x, calculation_dictionaries),
)
)
grades.reset_index(inplace=True)
# format the grades
grades[["threshold", "min_score", "grade"]] = pd.DataFrame(
grades["grade_dict"].values.tolist(), index=grades.index
)
# Make a new record
print(f"Record created at {datetime.now()}")
record_id = create_record(current_term["id"], engine)
grades["record_id"] = record_id
# Create grades_dict for database insert
grades.rename(columns={"links.user": "user_id"}, inplace=True)
grade_cols = [
"course_id",
"user_id",
"threshold",
"min_score",
"grade",
"record_id",
]
grades_list = grades[grade_cols].to_dict("r")
# Delete grades from current term
delete_grades_current_term(current_term["id"], engine)
# Insert into Database
if len(grades_list):
insert_grades_to_db(grades_list, engine)
def calc_outcome_avgs(outcome_results):
"""
Calculates outcome averages with both simple and weighted averages,
choosing the higher of the two
:param outcome_results:
:return: DataFrame of outcome_averages with max of two averages
"""
group_cols = ["links.user", "outcome_id", "course_id"]
# Calculate the average without dropping the low score
no_drop_avg = (
outcome_results.groupby(group_cols)
.agg(no_drop_score=("score", "mean"))
.reset_index()
)
# Calculate the average without dropping the low score
outcome_results_drop_min = outcome_results[outcome_results["rank"] != 1.0]
drop_avg = (
outcome_results_drop_min.groupby(group_cols)
.agg(drop_score=("score", "mean"))
.reset_index()
)
# Merge them together
outcome_averages = | pd.merge(no_drop_avg, drop_avg, on=group_cols) | pandas.merge |
import pandas_datareader.data as web
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import tweepy
from ibm_watson import PersonalityInsightsV3
import json
import pandas as pd
import time
# Credentials for APIs and auth callers
TWITTER_AUTH = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY,TWITTER_CONSUMER_SECRET)
TWITTER_AUTH.set_access_token(TWITTER_ACCESS_TOKEN,TWITTER_ACCESS_TOKEN_SECRET)
TWITTER = tweepy.API(TWITTER_AUTH)
# Placeholder data
# df = pd.read_csv('austen_with_time.csv')
app = dash.Dash()
app.layout = html.Div(children=[
# Hidden div that stores cached state, the intermediate value
html.Div(id='intermediate-value', style={'display': 'none'}),
html.Div(children='''
Enter twitter handle:
'''),
dcc.Input(id='input', value='elonmusk', type='text'),
html.Div(id='overall-graph'),
html.Div(id='personality-graph'),
html.Div(id='needs-graph'),
html.Div(id='values-graph'),
])
# Get data, process it, and make it available to other callbacks
@app.callback(Output('intermediate-value', 'children'),
[Input('input', 'value')]
)
def get_and_process_data(input):
# WARNING: this is a very expensive step
# API call and store data
# Do not set the num_periods over 5, it will eat up the API
num_periods = 5
name = 'elonmusk'
df_a = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import argparse
import tempfile
import pandas as pd
import sys_tool
from allele_util import AlleleUtil
from coord_util import CoordUtil
from tf_util import TfUtil
from dhs_util import MasterDhsUtil, UniformDhsUtil
from phastcons_util import PhastconsUtil
from tss_util import TssDistUtil
from eqtl_util import EqtlUtil
from gerp_util import GerpUtil
from genome_seg_util import GenomeSegUtil
from dna_shape_gc_content_util import DnaShapeGcContentUtil
from eigen_util import EigenUtil
from nki_lad_util import NkiLadUtil
from repli_chip_util import RepliChipUtil
from sanger_tfbs_util import SangerTfbsUtil
from vista_enhancer_util import VistaEnhancerUtil
from gwava_util import GwavaUtil
from augmented_feature_util import AugmentedUtil
if sys.version_info[0] == 3:
# Using Python3
from functools import reduce
# `reduce` is a built-in function in Python2
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract BEF features.", allow_abbrev=False)
parser.add_argument('-r', '--rsnp', dest='r_src', type=str, required=True,
help="input file of rSNP rsid")
parser.add_argument('-c', '--csnp', dest='c_src', type=str, required=True,
help="input file of cSNP rsid")
parser.add_argument('-f', '--feature', dest='f_dest', type=str, required=True,
help="output file of the feature matrix")
args = parser.parse_args()
print("[cerenkov_bef] RSNP src: {}; CSNP src: {}; Feature Dest: {};".
format(args.r_src, args.c_src, args.f_dest))
# ----- 0. READ RSID -----
rsnp_fn = args.r_src
rsnp_dfm = pd.read_table(rsnp_fn, header=0, usecols=['name'])
rsnp_dfm = rsnp_dfm.assign(label=1)
csnp_fn = args.c_src
csnp_dfm = | pd.read_table(csnp_fn, header=0, usecols=['name']) | pandas.read_table |
"""
A nowtrade module to enables pulling stock/currency data from external sources.
Also makes it easy to store this data locally for future strategy testing.
"""
import urllib.request
import urllib.error
import zipfile
import datetime
from io import StringIO
import pandas_datareader.data as web
import pandas as pd
from pandas import read_csv
from nowtrade import logger
class NoDataException(Exception):
"""
Exception used when no data could be gathered from a data connection.
"""
pass
class DataConnection(object):
"""
Base class for all data connections.
"""
def __init__(self):
self.logger = logger.Logger(self.__class__.__name__)
self.logger.info('Initialized')
def __str__(self):
return self.__class__.__name__
class YahooConnection(DataConnection):
"""
Utilizes Pandas' Remote Data Access methods to fetch
symbol data from Yahoo.
"""
def get_data(self, symbol, start, end):
"""
@type symbol: string
@type start: datetime
@type end: datetime
@return: Returns a pandas DataFrame of the requested symbol
@rtype: pandas.DataFrame
"""
ret = web.DataReader(str(symbol).upper(), 'yahoo', start, end)
ret.rename(columns=lambda name: '%s_%s' % (symbol, name), inplace=True)
return ret
class GoogleConnection(DataConnection):
"""
Utilizes Pandas' Remote Data Access methods to fetch
symbol data from Google.
"""
def _request(self, url):
"""
Used for custom request outside of Pandas framework.
"""
try:
return urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
print('Error when connecting to Google servers:', urllib.error)
except IOError as error:
print('Could not connect to Google servers with url %s: %s' %
(url, error))
except Exception as error: # pylint: disable=broad-except
print(
'Unknown Error when trying to connect to Google servers: %s' % error)
def get_data(self, symbol, start, end, symbol_in_column=True):
"""
@type symbol: string
@type start: datetime
@type end: datetime
@return: Returns a pandas DataFrame of the requested symbol
@rtype: pandas.DataFrame
"""
ret = web.DataReader(str(symbol).upper(), 'google', start, end)
if symbol_in_column:
ret.rename(columns=lambda name: '%s_%s' %
(symbol, name), inplace=True)
return ret
def get_ticks(self, symbol, period='15d', interval=60, symbol_in_column=True):
"""
Always returns 15 days worth of 1min data.
Get tick prices for the given ticker symbol.
@param symbol: symbol symbol
@type symbol: string
"""
symbol = str(symbol).upper()
data = None # Return data
url = 'http://www.google.com/finance/getprices?i=%s&p=%s&f=d,o,h,l,c,v&q=%s' \
% (interval, period, symbol)
page = self._request(url)
entries = page.readlines()[7:] # first 7 line is document information
days = [] # Keep track of all days
day = None # Keep track of current day
date = None # Keep track of current time
# sample values:'a1316784600,31.41,31.5,31.4,31.43,150911'
for entry in entries:
quote = entry.strip().split(',')
if quote[0].startswith('a'): # Datetime
day = datetime.datetime.fromtimestamp(int(quote[0][1:]))
days.append(day)
date = day
else:
date = day + \
datetime.timedelta(minutes=int(quote[0])*interval/60)
if symbol_in_column:
data_frame = pd.DataFrame({'%s_Open' % symbol: float(quote[4]),
'%s_High' % symbol: float(quote[2]),
'%s_Low' % symbol: float(quote[3]),
'%s_Close' % symbol: float(quote[1]),
'%s_Volume' % symbol: int(quote[5])},
index=[date])
else:
data_frame = pd.DataFrame({'Open': float(quote[4]),
'High': float(quote[2]),
'Low': float(quote[3]),
'Close': float(quote[1]),
'Volume': int(quote[5])},
index=[date])
if data is None:
data = data_frame
else:
data = data.combine_first(data_frame)
# Reindex for missing minutes
new_index = None
for day in days:
index = pd.date_range(start=day, periods=391, freq='1Min')
if new_index is None:
new_index = index
else:
new_index = new_index + index
# Front fill for minute data
return data.reindex(new_index, method='ffill')
class OandaConnection(DataConnection):
"""
Data connection used to gather data from the Oanda forex broker.
"""
def __init__(self, account_id, access_token, environment='practice'):
DataConnection.__init__(self)
import oandapy # pylint: disable=import-error
self.account_id = account_id
self.environment = environment
self.oanda = oandapy.API(
environment=environment, access_token=access_token)
def __str__(self):
return 'OandaConnection(account_id=%s, access_token=******, environment=%s)' \
% (self.account_id, self.environment)
def __repr__(self):
return 'OandaConnection(account_id=%s, access_token=******, environment=%s)' \
% (self.account_id, self.environment)
def get_data(self, symbol, granularity='H1', periods=5000,
realtime=False, symbol_in_column=True):
"""
Gets the dataframe containing all of the currency data requested.
"""
self.logger.info('Getting %s candles of %s data for %s granularity \
(realtime=%s, symbol_in_column=%s)'
% (periods, symbol, granularity, realtime, symbol_in_column))
candles = self.oanda.get_history(account_id=self.account_id,
instrument=symbol,
granularity=granularity,
count=periods)['candles']
if not realtime:
candles.pop()
data = None
for candle in candles:
date = datetime.datetime.strptime(
candle['time'], "%Y-%m-%dT%H:%M:%S.000000Z")
if symbol_in_column:
data_frame = pd.DataFrame({'%s_Open' % symbol: candle['openBid'],
'%s_High' % symbol: candle['highBid'],
'%s_Low' % symbol: candle['lowBid'],
'%s_Close' % symbol: candle['closeBid'],
'%s_Volume' % symbol: candle['volume']},
index=[date])
else:
data_frame = pd.DataFrame({'Open' % symbol: candle['openBid'],
'High' % symbol: candle['highBid'],
'Low' % symbol: candle['lowBid'],
'Close' % symbol: candle['closeBid'],
'Volume' % symbol: candle['volume']},
index=[date])
if data is None:
data = data_frame
else:
data = data.combine_first(data_frame)
self.logger.debug('Data: %s' % data)
return data
class ForexiteConnection(DataConnection):
"""
Forexite 1min data
"""
URL = "http://www.forexite.com/free_forex_quotes/%s/%s/%s%s%s.zip"
#URL = "http://www.forexite.com/free_forex_quotes/YY/MM/DDMMYY.zip"
def get_data(self, start, end):
"""
Always returns 1min OPEN, HIGH, LOW, CLOSE for all available currency
pairs on the Forexite website. No Volume information.
"""
assert start <= end
data = {}
# One day at a time
while start <= end:
day = str(start.day)
if len(day) == 1:
day = '0%s' % day
month = str(start.month)
if len(month) == 1:
month = '0%s' % month
long_year = str(start.year)
year = long_year[2:]
url = self.URL % (long_year, month, day, month, year)
start = start + datetime.timedelta(1)
try:
page = urllib.request.urlopen(url)
except urllib.error.HTTPError as error:
print(error)
continue
zipf = zipfile.ZipFile(StringIO(page.read()))
series = read_csv(zipf.open('%s%s%s.txt' %
(day, month, year)), parse_dates=True)
for ticker in series['<TICKER>'].unique():
data_frame = series.loc[series['<TICKER>']
== ticker] # pylint: disable=no-member
first_row = data_frame.iloc[0]
start_date = first_row['<DTYYYYMMDD>']
start_time = first_row['<TIME>']
data_frame.index = pd.date_range(str(start_date) + ' ' +
str(start_time).zfill(6),
periods=len(data_frame),
freq='1Min')
del data_frame['<TICKER>']
del data_frame['<DTYYYYMMDD>']
del data_frame['<TIME>']
def rename_columns(name): return '%s_%s' % (ticker, name.strip(
'<>').capitalize()) # pylint: disable=cell-var-from-loop
data_frame.rename(columns=rename_columns, inplace=True)
if ticker in data:
data[ticker] = data[ticker].combine_first(data_frame)
else:
data[ticker] = data_frame
return data
class MySQLConnection(DataConnection):
"""
MySQL database connection to retrieve data.
Requires a table name that matches the capitalized name of the symbol you
are pulling from. For example, if you wanted to pull data for the 'msft'
symbol, you would need a MySQL table named 'MSFT'.
"""
def __init__(self, host='localhost', port=3306, database='symbol_data',
username='root', password=''):
DataConnection.__init__(self)
import MySQLdb
_db = MySQLdb.connect(host=host,
port=port,
user=username,
passwd=password,
db=database)
self.cursor = _db.cursor()
def get_data(self, symbol, start, end, volume=False,
date_column='date', custom_cols=None):
"""
Returns a dataframe of the symbol data requested.
Assumes a MySQL table exists for the capitalized symbol name.
Assumes you have column names matching the following:
date, open, high, low, close, volume
Volume is optional.
custom_cols is a list of custom column names you want to pull in on top
of the OHLCV data.
"""
if custom_cols is None:
custom_cols = []
query = 'SELECT %s, open, high, low, close' % date_column
if volume:
query += ', volume'
for col in custom_cols:
query += ', %s' % col
query += ' FROM %s WHERE %s >= "%s" AND %s <= "%s"'
query = query % (symbol,
date_column,
start,
date_column,
end)
num_results = self.cursor.execute(query)
if num_results < 1:
raise NoDataException()
results = []
for result in self.cursor.fetchall():
row = {'%s_Date' % symbol: result[0],
'%s_Open' % symbol: result[1],
'%s_High' % symbol: result[2],
'%s_Low' % symbol: result[3],
'%s_Close' % symbol: result[4]}
index = 4
if volume:
index += 1
row['%s_Volume' % symbol] = result[index]
for col in custom_cols:
index += 1
row['%s_%s' % (symbol, col)] = result[index]
results.append(row)
ret = | pd.DataFrame.from_dict(results) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2019-02-19 18:41:56
# @Last Modified by: <NAME>
# @Last Modified time: 29.11.2019
"""
This script demonstrates how a building can be generated by importing building
data from excel.
An appropriate example file with some building data is imported from
examplefiles/ExcelBuildingData_Sample.xlsx.
In the excel every room is listed by its own, via a custom defined zoning
algorithm these rooms are combined to zones.
The user needs to adjust the zoning to his needs.
See # Block: Zoning methodologies (define your zoning function here)
Limitations and assumptions:
- Outer and inner wall area depend on the calculations done in the excel
- Ground floor area is only as big the respective net area of the heated room
volume (NetArea)
- Floor area is only as big the respective net area of the heated room volume
(NetArea)
- Rooftop area is only as big the respective net area of the heated room
volume (NetArea)
- Rooftops are flat and not tilted, see "RooftopTilt"
- Ceiling area is only as big the respective net area of the heated room
volume (NetArea)
- Ceiling, floor and inner walls are only respected by half their area,
since they belong half to the respective
and half to the adjacent zone
- Orientations are clockwise in degree, 0ยฐ is directed north
-respective construction types have to be added to the TypeBuildingElements.json
-respective UsageTypes for Zones have to be added to the UseConditions.json
-excel file format has to be as shown in the "ExcelBuildingData_Sample.xlsx"
Information about the required excel format:
#Documentation in progress!
-yellowed columns are necessary input to teaser -> donยดt change column
header, keep value names consistent.
-non yellowed columns may either not be used or be used for your zoning
algorithm
-Under the cell โUsage typeโ you will see some cells that are blank but have
their row filled.
It means the blank cell actually belongs to the Usage type above but in that
specific row we filled the characteristics
of the window/wall of a different orientation of the same exact room. That
means every row is either a new room or a
new orientation of that room. A room might have two outer walls in two
different orientation so for each outer wall,
a an extra row defining the respective orientation is added
-The entries in the excel sheet must be consistent for python being able to
convert it.
-If an inner wall is reaching inside a room but is not the limit of the room,
it should be accounted with 2x the area
"""
import os
import warnings
import shutil
import pandas as pd
import numpy as np
from teaser.project import Project
from teaser.logic.buildingobjects.building import Building
from teaser.logic.buildingobjects.thermalzone import ThermalZone
from teaser.logic.buildingobjects.useconditions import UseConditions
from teaser.logic.buildingobjects.buildingphysics.outerwall import OuterWall
from teaser.logic.buildingobjects.buildingphysics.floor import Floor
from teaser.logic.buildingobjects.buildingphysics.rooftop import Rooftop
from teaser.logic.buildingobjects.buildingphysics.groundfloor import GroundFloor
from teaser.logic.buildingobjects.buildingphysics.ceiling import Ceiling
from teaser.logic.buildingobjects.buildingphysics.window import Window
from teaser.logic.buildingobjects.buildingphysics.innerwall import InnerWall
def import_data(path=None, sheet_names=None):
"""
Import data from the building data excel file and perform some
preprocessing for nan and empty cells.
If several sheets are imported, the data is concatenated to one dataframe
Parameters
----------
path: str
path to the excel file that should be imported
sheet_names: list or str
sheets of excel that should be imported
"""
# process an import of a single sheet as well as several sheets,
# which will be concatenated with an continuous index
if type(sheet_names) == list:
data = pd.DataFrame()
_data = pd.read_excel(io=path, sheet_name=sheet_names, header=0, index_col=None)
for sheet in sheet_names:
data = data.append(_data[sheet], sort=False)
data = data.reset_index(drop=False)
data["index"] = data["index"] + 2 # sync the index with the excel index
else:
data = pd.read_excel(io=path, sheet_name=sheet_names, header=0, index_col=0)
# Cut of leading or tailing white spaces from any string in the dataframe
data = data.applymap(lambda x: x.strip() if type(x) is str else x)
# Convert every N/A, nan, empty strings and strings called N/a, n/A, NAN,
# nan, na, Na, nA or NA to np.nan
data = data.replace(
["", "N/a", "n/A", "NAN", "nan", "na", "Na", "nA", "NA"], np.nan, regex=False
)
data = data.fillna(np.nan)
return data
def get_list_of_present_entries(list_):
"""
Extracts a list of all in the list available entries, discarding "None"
and "nan" entries
Parameters
----------
list_: list
list that shall be processed
"""
_List = []
for x in list_:
if x not in _List:
if not None:
if not pd.isna(x):
_List.append(x)
return _List
# Block: Zoning methodologies (define your zoning function here)
# -------------------------------------------------------------
def zoning_example(data):
"""
This is an example on how the rooms of a building could be aggregated to
zones.
In this example the UsageType has to be empty in the case that the
respective line does not represent another
room but a different orientated wall or window belonging to a room that
is already declared once in the excel file.
Parameters
----------
data: pandas.dataframe
The data which shall be zoned
return data: pandas.dataframe
The zoning should return the imported dataset with an additional
column called "Zone" which inhibits the
information to which zone the respective room shall be part of,
and also a column called "UsageType_Teaser" which stores the
in UsageType of each row.
UsageType must be available in the UseConditions.json.
"""
# account all outer walls not adjacent to the ambient to the entity
# "inner wall"
# !right now the wall construction of the added wall is not respected,
# the same wall construction as regular
# inner wall is set
for index, line in data.iterrows():
if not pd.isna(line["WallAdjacentTo"]):
data.loc[index, "InnerWallArea[mยฒ]"] = (
data.loc[index, "OuterWallArea[mยฒ]"]
+ data.loc[index, "WindowArea[mยฒ]"]
+ data.loc[index, "InnerWallArea[mยฒ]"]
)
data.loc[index, "WindowOrientation[ยฐ]"] = np.NaN
data.loc[index, "WindowArea[mยฒ]"] = np.NaN
data.loc[index, "WindowConstruction"] = np.NaN
data.loc[index, "OuterWallOrientation[ยฐ]"] = np.NaN
data.loc[index, "OuterWallArea[mยฒ]"] = np.NaN
data.loc[index, "OuterWallConstruction"] = np.NaN
# make all rooms that belong to a certain room have the same room identifier
_list = []
for index, line in data.iterrows():
if pd.isna(line["BelongsToIdentifier"]):
_list.append(line["RoomIdentifier"])
else:
_list.append(line["BelongsToIdentifier"])
data["RoomCluster"] = _list
# check for lines in which the net area is zero, marking an second wall
# or window
# element for the respective room, and in which there is still stated a
# UsageType which is wrong
# and should be changed in the file
for i, row in data.iterrows():
if (row["NetArea[mยฒ]"] == 0 or row["NetArea[mยฒ]"] == np.nan) and not pd.isna(
row["UsageType"]
):
warnings.warn(
"In line %s the net area is zero, marking an second wall or "
"window element for the respective room, "
"and in which there is still stated a UsageType which is "
"wrong and should be changed in the file" % i
)
# make all rooms of the cluster having the usage type of the main usage type
_groups = data.groupby(["RoomCluster"])
for index, cluster in _groups:
count = 0
for line in cluster.iterrows():
if | pd.isna(line[1]["BelongsToIdentifier"]) | pandas.isna |
import os
import pandas as pd
#from pandas.util.testing import assert_frame_equal
from rdkit import Chem
from rdkit.Chem import AllChem
import metamoles
from metamoles import cheminform
#from metamoles import *
#Tests for the RDKit molecular similarity functions
#Requires metamoles/test/playground_df_cleaned_kegg_with_smiles.csv to be in the same directory for tests to pass.
data_path = os.path.join(metamoles.__path__[0], 'data')
def test_input_data():
"""Tests input_data function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.input_data(input_df)
assert isinstance(test_df, pd.DataFrame) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_fingerprint_products():
"""Tests fingerprint_products function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.input_data(input_df)
assert isinstance(cheminform.fingerprint_products(test_df), pd.DataFrame) == True, """TypeError,
function should return a pandas dataframe"""
#assert
return '1/1 tests successful'
def test_sim_i_j():
"""Tests sim_i_j function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))
A = test_df.iloc[0]
#B = test_df.iloc[1]
#C = test_df.iloc[2]
assert cheminform.sim_i_j(A, A) == 1, "Self correlation is broken"
#assert metamoles.sim_i_j(A, B) == -1, "Standard correlation is broken"
#assert metamoles.sim_i_j(A, C) == 0, "Standard correlation is broken"
return '1/1 tests successful'
def test_sim_i_all():
"""Test sim_i_all function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))
metric = pd.DataFrame()
assert metric.empty == True, """ShapeError, input metric dataframe
should be initialized as empty"""
for index, row in test_df.iterrows():
assert cheminform.sim_i_all(test_df, index, row, metric) == None, """OutputError, function
shouldn't return anything"""
assert metric[index].all() >= 0 and metric[index].all() <= 1.0, """ValueError,
metric should be between 0 and 1"""
return "3/3 Tests successful"
def test_sim_metric():
"""Test sim_i_all function in metamoles.py"""
input_df = pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv")
test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))
assert isinstance(cheminform.sim_metric(test_df), pd.DataFrame) == True, """TypeError,
function should return a dataframe"""
assert cheminform.sim_metric(test_df).isnull().values.any() == False, """ValueError,
function-generated dataframe should not contain null values"""
#assert test_df.size == metamoles.sim_metric(test_df).size, """ShapeError,
#function-generated dataframe should be the same size as input dataframe"""
return "2/2 Tests successful"
def test_calculate_dist():
"""Test calculate_dist function in metamoles.py"""
df = | pd.read_csv(data_path + "/playground_df_cleaned_kegg_with_smiles.csv") | pandas.read_csv |
import csv
from pathlib import Path
import pandas as pd
import numpy as np
import torch as torch
from torch import nn
from torch.nn import functional as F
import sklearn
import time
import wandb
import uuid
import numerapi
import pathlib
import os
import shutil
from functools import reduce
import scipy
from fast_soft_sort.pytorch_ops import soft_rank
from tqdm import tqdm
TARGET_NAME = f"target"
PREDICTION_NAME = f"prediction"
def getImportantFeatures(model,test_data,feature_names):
diff = MDA(model,feature_names,test_data)
keep_features=[]
for i in diff:
if i[1] > 0:
keep_features.append(i[0])
return keep_features
def corrcoef(target, pred):
# np.corrcoef in torch from @mdo
# https://forum.numer.ai/t/custom-loss-functions-for-xgboost-using-pytorch/960
pred_n = pred - pred.mean()
target_n = target - target.mean()
pred_n = pred_n / pred_n.norm()
target_n = target_n / target_n.norm()
return (pred_n * target_n).sum()
def spearman(
target,
pred,
regularization="l2",
regularization_strength=1.0,
):
target = target.view(1,-1)
pred = pred.view(1,-1)
pred = soft_rank(
pred,
regularization=regularization,
regularization_strength=regularization_strength,
)
return pred.requires_grad_(True).mean()
def score_(df):
return correlation(df['prediction'], df['target'])
def numerai_score(df):
scores = df.groupby('era').apply(score_)
return scores.mean(), scores.std(ddof=0)
def MDA(model, features, testSet):
"""
function from https://forum.numer.ai/t/feature-selection-by-marcos-lopez-de-prado/3170
"""
preds=model(torch.from_numpy(testSet[features].to_numpy()).float().view(-1, 1, len(features))).detach().numpy()
testSet['prediction'] = preds # predict with a pre-fitted model on an OOS validation set
corr, std = numerai_score(testSet) # save base scores
print("Base corr: ", corr)
diff = []
np.random.seed(42)
with tqdm(total=len(features)) as progress:
for col in features: # iterate through each features
X = testSet.copy()
np.random.shuffle(X[col].values) # shuffle the a selected feature column, while maintaining the distribution of the feature
inp = torch.from_numpy(X[features].to_numpy()).view(-1, 1, len(features)).float()
testSet['prediction'] = model(inp).detach().numpy()# run prediction with the same pre-fitted model, with one shuffled feature
corrX, stdX = numerai_score(testSet) # compare scores...
# print(col, corrX-corr)
diff.append((col, corrX-corr))
progress.update(1)
return diff
def refresh_numerai_data():
remove("numerai_datasets.zip")
remove("numerai_datasets")
napi = numerapi.NumerAPI(verbosity="info")
napi.download_current_dataset(unzip=True,dest_filename="numerai_datasets")
def get_factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
def determine_fitness_for_batch_size(n):
if n < 4000 and n > 80:
return True
else:
return False
def get_batch_size(total_size):
factors = list(get_factors(total_size))
factors = list(filter(determine_fitness_for_batch_size, factors))
if len(factors) > 0:
return np.max(factors)
else:
return 1
def calculate_multilayer_output_length_conv(layers, length_in, kernel_size, stride=1, padding=0, dilation=1):
for i in range(layers):
length_in = calculate_output_length_conv(length_in, kernel_size, stride, padding, dilation)
return length_in
def get_dataset():
training_data = pd.read_csv("numerai_datasets/numerai_training_data.csv")
target = training_data["target"]
tournament_data = pd.read_csv("numerai_datasets/numerai_tournament_data.csv")
feature_names = [
f for f in training_data.columns if f.startswith("feature")
]
return training_data,tournament_data,feature_names
def remove(path):
""" param <path> could either be relative or absolute. """
if os.path.isfile(path) or os.path.islink(path):
os.remove(path) # remove the file
elif os.path.isdir(path):
shutil.rmtree(path) # remove dir and all contains
else:
raise ValueError("file {} is not a file or dir.".format(path))
class BatchNormResizeLayer(nn.Module):
def __init__(self, lambd):
super(BatchNormResizeLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class CustomConfig:
"""A class for storing configurations that works with wandb."""
def __init__(self, init_dict=None, **kwargs):
self.dict_version={}
for k, v in kwargs.items():
setattr(self, k, v)
self.dict_version[k]=v
if init_dict != None:
for i in init_dict:
self.dict_version[i]=init_dict[i]
setattr(self, i, init_dict[i])
def calculate_output_length_conv(length_in, kernel_size, stride=1, padding=0, dilation=1):
return (length_in + 2 * padding - dilation * (kernel_size - 1) - 1) // stride + 1
# Submissions are scored by spearman correlation
def correlation(predictions, targets):
ranked_preds = predictions.rank(pct=True, method="first")
return np.corrcoef(ranked_preds, targets)[0, 1]
# convenience method for scoring
def score(df):
return correlation(df[PREDICTION_NAME], df[TARGET_NAME])
# Payout is just the score cliped at +/-25%
def payout(scores):
return scores.clip(lower=-0.25, upper=0.25)
# to neutralize a column in a df by many other columns on a per-era basis
def neutralize(df,
columns,
extra_neutralizers=None,
proportion=1.0,
normalize=True,
era_col="era"):
# need to do this for lint to be happy bc [] is a "dangerous argument"
if extra_neutralizers is None:
extra_neutralizers = []
unique_eras = df[era_col].unique()
computed = []
for u in unique_eras:
print(u, end="\r")
df_era = df[df[era_col] == u]
scores = df_era[columns].values
if normalize:
scores2 = []
for x in scores.T:
x = (pd.Series(x).rank(method="first").values - .5) / len(x)
scores2.append(x)
scores = np.array(scores2).T
extra = df_era[extra_neutralizers].values
exposures = np.concatenate([extra], axis=1)
else:
exposures = df_era[extra_neutralizers].values
scores -= proportion * exposures.dot(
np.linalg.pinv(exposures.astype(np.float32)).dot(scores.astype(np.float32)))
scores /= scores.std(ddof=0)
computed.append(scores)
return pd.DataFrame(np.concatenate(computed),
columns=columns,
index=df.index)
# to neutralize any series by any other series
def neutralize_series(series, by, proportion=1.0):
scores = series.values.reshape(-1, 1)
exposures = by.values.reshape(-1, 1)
# this line makes series neutral to a constant column so that it's centered and for sure gets corr 0 with exposures
exposures = np.hstack(
(exposures,
np.array([np.mean(series)] * len(exposures)).reshape(-1, 1)))
correction = proportion * (exposures.dot(
np.linalg.lstsq(exposures, scores, rcond=None)[0]))
corrected_scores = scores - correction
neutralized = pd.Series(corrected_scores.ravel(), index=series.index)
return neutralized
def unif(df):
x = (df.rank(method="first") - 0.5) / len(df)
return | pd.Series(x, index=df.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 19 20:35:51 2017
@author: Steff
"""
import pandas as pd
import json as j
def extract_companies(indices = []):
data = pd.read_csv("../../data/raw/movies_metadata.csv", index_col=5)
if (len(indices) > 0):
# keep only necessary rows
data = data.loc[indices]
comp = data["production_companies"]
comp = comp.str.replace(" "," ")
comp = comp.str.replace(" , ",", ")
comp = comp.str.replace(': \"',": \'")
comp = comp.str.replace(', \"',", \'")
comp = comp.str.replace('",',"',")
comp = comp.str.replace("\"","'")
comp = comp.str.replace("'name': '","\"name\": \"")
comp = comp.str.replace("', 'id':","\", \"id\":")
comp = comp.str.replace("'id':","\"id\":")
comp = comp.str.replace("\\","")
comp = comp.str.replace("xa0","")
#comp = comp.str.replace("'name':","\"name\":").replace("'id':","\"name\":")
def parseCompany(m_id, c_id, c_name):
#print(m_id,c_id,c_name)
data_company_to_movie.append({
"company": c_id,
"movie": m_id
})
data_companies[c_id] = c_name
if (c_id == 0):
print(m_id,c_name)
data_company_to_movie = []
data_companies = {}
i = 0
for index, row in comp.iteritems():
i += 1
#print(row)
try:
jrow = j.loads(row)
for c in jrow:
parseCompany(index,c["id"],c["name"])
except ValueError:
print("\n----------------\n",index,row,"\n----------------\n")
except TypeError:
print("\n----------------\n",index,row,"\n----------------\n")
#if (i>3):
# break
c2m_df = | pd.DataFrame(data_company_to_movie) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""OREGON Arrest Analysis (anna).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kchDTfhL69aAuNMBJmlfIrSrP8j7bBrJ
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab
from math import pi
from google.colab import files
uploaded = files.upload()
data_path = "oregonfinalversion1.csv"
arrests = pd.read_csv(data_path, index_col=0)
#arrests.loc[' STATEWIDE TOTAL']
arrests.head()
race = ['White', 'Black', 'Other']
totalArrests = [83117, 3307, 10470]
fig, ax=plt.subplots(figsize = (5,5)) ##creating bar chart
plt.bar(race, totalArrests)
plt.xlabel('Race')
plt.ylabel('Total Arrests')
plt.title('Total Arrests in Oregon in 2019 by Race')
for i, data in enumerate(totalArrests): ##adding labels to bars
plt.text(x=i, y=data+1, s=f"{data}", ha='center')
plt.show()
r = ['White', 'Black', 'Other']
rawData = {'greenBars':[83117, 3307,10470], 'blueBars':[3163765,92680,956292]}
df = pd.DataFrame(rawData)
totals = [i+j for i,j in zip(df['greenBars'], df['blueBars'])]
greenBars = [i / j * 100 for i,j in zip(df['greenBars'], totals)]
blueBars = [i / j * 100 for i,j in zip(df['blueBars'], totals)]
fig, ax=plt.subplots(figsize = (5,10))
barWidth = 0.85
names = race
plt.bar(r, greenBars, color='#7F8DA8', edgecolor='white', width=barWidth)
plt.bar(r, blueBars, bottom=greenBars, color='#FADFC3', edgecolor='white', width=barWidth)
plt.xticks(r, names)
plt.xlabel("Race")
plt.ylabel("Percentage")
plt.ylim(0,30)
plt.title('Arrests as % of Population in Oregon')
plt.show()
r2 = ['White', 'Black','Other']
rawData = {'greenBars':[10979,1335,2522], 'blueBars':[83117,3307,10470]}
df = | pd.DataFrame(rawData) | pandas.DataFrame |
import re
import loompy
import pandas as pd
from loom_table.loom_utils import csv_to_loom
def is_ion_formula(string: str) -> bool:
# At least two chemical elements with number or no number, optional positive or negative adduct,
# optional numbered suffix (to match also annotation_id)
return (
re.match(r"^(?:[A-Z][a-z]?\d*){2,}(?:[+-][A-Z][a-z]?)?(_\d+)?$", string)
is not None
)
point_cols = ["center_y", "center_x"]
ion_intensity_cols = [
c for c in pd.read_csv("../spatiomolecular_matrix.csv").columns if is_ion_formula(c)
]
csv_to_loom(
csv_path="../spatiomolecular_matrix.csv",
dense_columns=point_cols,
out_path="spatiomolecular_matrix.zarr.loom",
)
with loompy.connect("spatiomolecular_matrix.zarr.loom") as ds:
ds.row_attrs["area"] # int64 column
# Use scan to iterate over views of "chunks"
# or create a view of the whole dataset:
view = ds.view[:, :]
data = view[:]
row_attributes = view.ra # AttributeManager
# Each row attribute is a numpy array
# Storage is a dict of attribute names (str) and values (np.array)
row_attributes_df = | pd.DataFrame(view.ra.storage) | pandas.DataFrame |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = | SparseArray([0, np.nan, 4], fill_value=0) | pandas.core.sparse.api.SparseArray |
import os
import sys
import argparse
import pandas as pd
import numpy as np
### Version 3, created 12 August 2020 by <NAME> ###
### Reformats concatenated, headerless MELT vcf files, into the relevant information columns, with extraneous information/columns removed, ready to use in the duplicate-removal scripts
### This includes renaming MELT SPLIT hits to match the original TE names from RepeatMasker and the original TE library
def get_args():
#What this script does
parser = argparse.ArgumentParser(description="General removal of most MELT duplicate calls and overlapping calls", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
#Give input coordinate file of formatted MELT SPLIT hits
parser.add_argument('-s', '--split', help='filtered, unique, concatenated MELT SPLIT file', required=True)
#Argument of directory containing the formatted MELT DELETION hits
parser.add_argument('-del', '--deletion', help='filtered, unique, concatenated MELT DELETION file', required=True)
#Argument of directory containing the list of formatted MELT hits (need full path)
parser.add_argument('-d', '--directory', type=str, help='Path to the directory of the input file', default=".")
#Argument of the output directory (need full path)
parser.add_argument('-od', '--outdir', type=str, help='Location of directory for the output file', default=".")
#Argument of the output directory (need full path)
parser.add_argument('-mt', '--melttes', type=str, help='Path to list of MELT ZIP basenames (MELT compatible TE names = zip_te_names.txt)', required=True)
#Argument of the output directory (need full path)
parser.add_argument('-lib', '--telibrary', type=str, help='Path to the list of RepeatMasker TE names (te_list.txt)', required=True)
#Argument of the output directory (need full path)
parser.add_argument('-tecat', '--tecategories', type=str, help='Path to the list of RepeatMasker TE names, with TE family category (te_lib_categories.txt)', required=True)
args = parser.parse_args()
SPLIT = args.split
DELETION = args.deletion
DIR = args.directory
OUTDIR = args.outdir
SPLIT_TE_NAMES = args.melttes
TE_NAMES = args.telibrary
CAT_TE_NAMES = args.tecategories
return SPLIT, DELETION, DIR, OUTDIR, SPLIT_TE_NAMES, TE_NAMES, CAT_TE_NAMES
SPLIT, DELETION, DIR, OUTDIR, SPLIT_TE_NAMES, TE_NAMES, CAT_TE_NAMES = get_args()
if DIR == ".":
DIR = os.getcwd()
if OUTDIR == ".":
OUTDIR = os.getcwd()
BASENAME = os.path.basename(SPLIT).split("_SPLIT")[0]
SPLIT_HITS = os.path.join(DIR, SPLIT)
DEL_HITS = os.path.join(DIR, DELETION)
OUTBASE = os.path.join(OUTDIR, BASENAME)
OUTPUT1 = OUTBASE + "_cat_assess_dups_headers.bed"
OUTPUT2 = OUTBASE + "_cat_assess_dups.bed"
HEADERS1 = ['#CHROM', 'POS', 'END', 'ASSESS', 'SVTYPE', 'SVLENGTH', 'ORIENTATION', 'Austroriparius', 'Brandtii', 'Ciliolabrum', 'Davidii', 'Myotis', 'Occultus', 'Sept_TTU', 'Thysanodes', 'Velifer', 'Vivesi', 'Yumanensis', 'MODULE']
HEADERS2 = ['#CHROM', 'POS', 'END', 'SVTYPE', 'SVLENGTH', 'ORIENTATION', 'Austroriparius', 'Brandtii', 'Ciliolabrum', 'Davidii', 'Myotis', 'Occultus', 'Sept_TTU', 'Thysanodes', 'Velifer', 'Vivesi', 'Yumanensis', 'MODULE']
SPLIT_DF = pd.read_csv(SPLIT_HITS, sep='\t', names=HEADERS1)
DEL_DF = | pd.read_csv(DEL_HITS, sep='\t', names=HEADERS2) | pandas.read_csv |
#!/usr/bin/python
import pandas as pd
from scipy.signal import savgol_filter
import json
import time
import darts
from darts import TimeSeries
from darts.models import RNNModel
from sktime.performance_metrics.forecasting import mean_absolute_percentage_error
import dysts
from dysts.flows import *
from dysts.base import *
from dysts.utils import *
from dysts.analysis import *
hyperparams = {
"input_chunk_length": 50,
"output_chunk_length": 1,
"model": "LSTM",
"n_rnn_layers": 2,
"random_state": 0
}
results = dict()
# cwd = os.getcwd()
cwd = os.path.dirname(os.path.realpath(__file__))
output_path = cwd + "/results/importance_sampling.json"
print("Saving data to: ", output_path)
full_epoch_count = 400
forecast_length = 200
transient_length = 2
n_iters = 5
epoch_count = 30
n_ic = 10 # model retraining is not currently working in darts
traj_len = 150
show_progress = False
print(f"{n_ic} points sampled per iteration, with trajectory length {traj_len}, for a total of {n_iters} iterations of length {epoch_count}")
print(n_ic * traj_len * n_iters * epoch_count)
print(1000 * full_epoch_count) # 1000 timepoints in an epoch
for equation_ind, equation_name in enumerate(get_attractor_list()):
np.random.seed(0)
print(f"{equation_name} {equation_ind}", flush=True)
results[equation_name] = dict()
equation = getattr(dysts.flows, equation_name)()
if hasattr(equation, "delay"):
if equation.delay:
continue
sol = equation.make_trajectory(1200, resample=True)
y_train, y_test = sol[:-forecast_length, 0], sol[-forecast_length:, 0]
y_train_ts = TimeSeries.from_dataframe(pd.DataFrame(y_train))
results[equation_name]["true_value"] = y_test.tolist()
## Baseline forecast
try:
del model
except:
pass
model = RNNModel(**hyperparams)
base_t0 = time.perf_counter()
model.fit(y_train_ts, epochs=full_epoch_count)
base_t1 = time.perf_counter()
y_val_pred = model.predict(forecast_length)
y_val_pred = np.squeeze(y_val_pred.values())
score = mean_absolute_percentage_error(y_test, y_val_pred, symmetric=True)
base_elapsed = base_t1 - base_t0
print(score, base_elapsed)
# plt.figure()
# plt.plot(y_test)
# plt.plot(y_val_pred)
results[equation_name]["base_value"] = y_val_pred.tolist()
results[equation_name]["base_time"] = base_elapsed
results[equation_name]["base_score"] = score
pred_backtest = model.historical_forecasts(y_train_ts, retrain=False, start=(1 + model.input_chunk_length)).values()
# Importance sampling experiment
model = RNNModel(**hyperparams)
comp_t0 = time.perf_counter()
for i in range(n_iters):
print(i)
if i == 0:
ic_indices = np.random.choice(np.arange(len(y_train_ts) - (forecast_length + transient_length)), n_ic)
else:
pred_backtest = model.historical_forecasts(y_train_ts, retrain=False, start=(1 + model.input_chunk_length)).values()
y_train_backtest = y_train_ts.values()[(1 + model.input_chunk_length):]
mse_back = np.squeeze((pred_backtest - y_train_backtest)**2)
mse_back = savgol_filter(mse_back, 51, 3)
mse_back[mse_back<0] = 0.0
sample_probs = mse_back**4
sample_probs /= np.sum(sample_probs)
ic_indices = np.random.choice(np.arange(len(y_train_backtest)), n_ic, p=sample_probs, replace=True)
## Random sampling as a control condition
# ic_indices = np.random.choice(np.arange(len(y_train_backtest)), n_ic)
# ic_indices = np.argsort(mse_back)[-n_ic:]
ic_vals = sol[ic_indices] + 1e-2 * (np.random.random(sol[ic_indices].shape) - 0.5)
equation.ic = ic_vals
new_sol = equation.make_trajectory(traj_len + transient_length, resample=True)[:, transient_length:, :]
y_train_list = list(new_sol[..., 0])
y_train_list = [TimeSeries.from_dataframe(pd.DataFrame(item)) for item in y_train_list]
model.fit(y_train_list, epochs=epoch_count)
comp_t1 = time.perf_counter()
comp_elapsed = comp_t1 - comp_t0
y_val_pred = model.predict(forecast_length, series=y_train_ts)
y_val_pred = np.squeeze(y_val_pred.values())
score = mean_absolute_percentage_error(y_test, y_val_pred, symmetric=True)
print(score, comp_elapsed)
# plt.plot(y_val_pred)
# plt.show()
results[equation_name]["importance_values"] = y_val_pred.tolist()
results[equation_name]["importance_time"] = comp_elapsed
results[equation_name]["importance_score"] = score
with open(output_path, 'w') as f:
json.dump(results, f, indent=4)
## Random timepoint baseline
for equation_ind, equation_name in enumerate(get_attractor_list()):
np.random.seed(0)
print(f"{equation_name} {equation_ind}", flush=True)
equation = getattr(dysts.flows, equation_name)()
if hasattr(equation, "delay"):
if equation.delay:
continue
sol = equation.make_trajectory(1200, resample=True)
y_train, y_test = sol[:-forecast_length, 0], sol[-forecast_length:, 0]
y_train_ts = TimeSeries.from_dataframe( | pd.DataFrame(y_train) | pandas.DataFrame |
# Process 2017 bitcoin data for Informer
import pandas as pd
import os
from sklearn.decomposition import PCA
import datetime
import math
import pandas as pd
import numpy as np
import torch
BETTI_NUMBER_0_PATH = "/content/drive/MyDrive/bitcoin/CoinWorks/data/betti_0(100).csv"
BETTI_NUMBER_1_PATH = "/content/drive/MyDrive/bitcoin/CoinWorks/data/betti_1(100).csv"
AMOMAT_DIR = "/content/drive/MyDrive/bitcoin/CoinWorks/data/amo/"
OCCMAT_DIR = "/content/drive/MyDrive/bitcoin/CoinWorks/data/occ/"
PRICE_PATH = "/content/drive/MyDrive/bitcoin/CoinWorks/data/pricedBitcoin2009-2018.csv"
PROCESSED_DIR = "/content/drive/MyDrive/aliyun/processed_data/2017/"
#TOTALTX_DIR = "/content/drive/MyDrive/aliyun/bitcoin_totaltx_2018_2020.csv"
#PERIOD = [2018, 2019, 2020]
def getBetweenDay(begin_date, end_date):
date_list = []
date_arr = []
date_unix_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
print("begin_date:",begin_date)
# end_date = datetime.datetime.strptime(time.strftime('%Y-%m-%d', time.localtime(time.time())), "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
print("end_date:",end_date)
while begin_date <= end_date:
date_unix = math.trunc(begin_date.replace(tzinfo=datetime.timezone.utc).timestamp()*1000)
date_unix_list.append(date_unix)
date_str = begin_date.strftime("%Y-%m-%d")
date_list.append(date_str)
date_arr.append([date_str, date_unix])
begin_date += datetime.timedelta(days=1)
return np.asarray(date_arr)
def combine_features_with_data(dataset_model):
data_price = pd.read_csv(PRICE_PATH)[-365:][["totaltx","price"]].reset_index(drop=True)
#btc_price_2018_2020 = data_price.Open.str.replace(",","")
#total_tx = pd.read_csv(TOTALTX_DIR, index_col=0)
date_arr = pd.DataFrame(getBetweenDay("2017-01-01", "2017-12-31"))[0]
btc_2017 = pd.concat([data_price, date_arr], axis = 1)
btc_2017.columns = ["totaltx", "price", "date"]
data_feature = pd.DataFrame([])
if dataset_model == "betti":
#for YEAR in PERIOD:
#for file_name in os.listdir(BETTI_NUMBER_DIR):
feature_betti_0 = pd.read_csv(BETTI_NUMBER_0_PATH, index_col=0).loc[:, "V1":"V50"]
feature_betti_1 = pd.read_csv(BETTI_NUMBER_1_PATH, index_col=0).loc[:, "V1":"V50"]
feature_betti_number = pd.concat([feature_betti_0,feature_betti_1], axis = 1)
data_feature = pd.concat([data_feature,feature_betti_number]).reset_index(drop=True)
#data_feature.to_csv("data_feature.csv")
print("data_feature:",data_feature)
elif dataset_model == "betti_der":
feature_betti_0 = pd.read_csv(BETTI_NUMBER_0_PATH, index_col=0).loc[:, "V1":"V50"]
feature_betti_1 = | pd.read_csv(BETTI_NUMBER_1_PATH, index_col=0) | pandas.read_csv |
#!/usr/bin/env python2
#coding=utf-8
# Copyright 2020 <NAME> and <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#coding=utf-8
from __future__ import unicode_literals
from __future__ import print_function
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import argparse
import functools
import sqlite3
import logging
import json
import manatee
manatee.setEncoding('UTF-8')
import numpy as np
import pandas as pd
import joblib
import datetime
import os.path
# from treeinterpreter import treeinterpreter as ti
log = logging.getLogger("train")
pstructure = "p"
gstructure = "g"
def text(corpus, docid, attrname='word'):
doc = corpus.get_struct('doc')
word = corpus.get_attr(attrname)
beg = doc.beg(docid)
end = doc.end(docid)
p = corpus.get_struct(pstructure)
g = corpus.get_struct(gstructure)
grng = g.whole()
prng = p.whole()
it = word.textat(beg)
paragraphs = []
tokens = None
for i in range(beg, end):
if prng.find_beg(i) == i:
if tokens is not None:
paragraphs.append(tokens)
tokens = []
if grng.find_beg(i) != i and tokens:
tokens.append(" ")
tokens.append(it.next())
if tokens:
paragraphs.append(tokens)
out = ''
for p in paragraphs:
for t in p:
out += t
out += ' '
return out.strip()
def gen_text(corpus, attrname):
doc = corpus.get_struct('doc')
attr = corpus.get_attr(attrname)
for i in xrange(doc.size()):
beg, end = doc.beg(i), doc.end(i)
it = attr.textat(beg)
yield [it.next() for pos in xrange(beg, end)]
def read_annots(corpus, db):
annots = pd.read_sql("select file, name, value from document_annotations", db)
annots = annots[annots.name.isin(simple_attributes)]
doc = corpus.get_struct("doc")
sa = doc.get_attr("filename")
lex = {sa.pos2str(i): i for i in xrange(doc.size())}
annots['docid'] = annots.file.map(lambda docname: lex.get(docname, -1))
return annots[annots.docid != -1].drop(columns="file")
default_annot_values = {
'zanr': ['zpravodajstvรญ', 'rozhovor', 'komentรกล'],
'tema': ['migraฤnรญ krize', 'domรกcรญ politika',
'zahraniฤnรญ politika / diplomacie',
'spoleฤnost / spoleฤenskรก situace', 'jinรฉ', 'energetika',
'sociรกlnรญ politika', 'konflikt na Ukrajinฤ', 'kultura',
'konflikt v Sรฝrii', 'zbrojnรญ politika', 'ekonomika / finance',
'konspirace'],
'zamereni': ['zahraniฤnรญ', 'domรกcรญ', 'obojรญ', 'nelze urฤit'],
'lokace': ['EU', 'ฤeskรก republika', 'USA', 'jinรก zemฤ',
'jinรฉ / nelze urฤit', 'Rusko', 'NATO', 'Rusko + USA'],
'argumentace': ['ne', 'ano'],
'emoce': ['missing', 'rozhoลฤenรญ', 'soucit', 'strach', 'nenรกvist', 'jinรก'],
'vyzneni_celku': ['neutrรกlnรญ', 'negativnรญ', 'pozitivnรญ'],
'rusko': ['missing', 'pozitivnรญ pลรญklad', 'neutrรกlnรญ', 'obฤลฅ',
'negativnรญ pลรญklad', 'hrdina'],
'vyzneni1': ['neutrรกlnรญ', 'negativnรญ', 'missing', 'pozitivnรญ', 'velebรญcรญ',
'nenรกvistnรฉ'],
'vyzneni2': ['neutrรกlnรญ', 'negativnรญ', 'missing', 'pozitivnรญ', 'velebรญcรญ',
'nenรกvistnรฉ'],
'vyzneni3': ['neutrรกlnรญ', 'negativnรญ', 'missing', 'pozitivnรญ', 'velebรญcรญ',
'nenรกvistnรฉ'],
'obrazek': ['ne', 'ano'],
'video': ['ne', 'ano'],
'nazor': ['ne', 'ano'],
'odbornik': ['ne', 'ano'],
'zdroj': ['ne', 'ano'],
'strach': ['ne', 'ano'],
'vina': ['ne', 'ano'],
'nalepkovani': ['ne', 'ano'],
'demonizace': ['ne', 'ano'],
'relativizace': ['ne', 'ano'],
'fabulace': ['ne', 'ano'],
'year': '2016,2017,2018'.split(',')
}
simple_attributes = ['vina', 'nalepkovani', 'argumentace', 'emoce',
'demonizace', 'relativizace', 'strach', 'fabulace', 'nazor', 'lokace',
'zdroj', 'rusko', 'odbornik', 'tema', 'zanr', 'zamereni',
'vyzneni_celku',
]
binary_attributes = [x for x in default_annot_values\
if len(default_annot_values[x]) == 2]
def main():
fmt = '[%(asctime)-15s] %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=fmt)
m = argparse.ArgumentDefaultsHelpFormatter
p = argparse.ArgumentParser(description="", formatter_class=m)
p.add_argument("-c", "--corpus", type=str, required=True)
p.add_argument("-d", "--db", type=str, required=True)
p.add_argument("-o", "--outfile", type=str, required=True)
args = p.parse_args()
log.info("opening database {}".format(args.db))
db = sqlite3.connect(args.db)
db.isolation_level = None # I want to handle transactions myself
log.info("opening corpus {}".format(args.corpus))
corp = manatee.Corpus(args.corpus)
log.info("corpus has %d positions" % corp.size())
log.info("reading annotations")
attrs = read_annots(corp, db)
headers_simple = []
headers_multi = []
for k, v in default_annot_values.iteritems():
if k not in simple_attributes:
continue
if len(v) > 2:
for vv in v:
headers_multi.append((k, vv, (k + "_" + vv).replace(' ', '-')))
else:
headers_simple.append(k)
log.info("reading corpus text")
doc = corp.get_struct('doc')
docsize = corp.get_struct('doc').size()
fn = doc.get_attr('filename')
with open('labels.csv', 'w') as lf:
for x in headers_simple: print(x, file=lf)
for x,y,z in headers_multi: print(z.encode('utf-8'), file=lf)
print("Grouping most common answer")
most_common = attrs.groupby(["docid", "name"]).agg(lambda x: | pd.Series.mode(x) | pandas.Series.mode |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
cardData = pd.read_csv('CardData.csv', header=0, encoding='utf-8-sig')
coinData = | pd.read_csv('CoinData.csv', header=0, encoding='utf-8-sig') | pandas.read_csv |
from sklearn.preprocessing import StandardScaler as SS
import pandas as pd
import numpy as np
class data_scaler(object):
"""
A class for scaling and unscaling data.
"""
def __init__(self, mva=30, data=None):
self.mva = mva
self.scaler = SS()
self.orig_data = data
self.datasets = {}
self.mvas = {}
self.datasetcount = 0
if data is not None:
self.datasets['orig'] = data
def set_mva(mva=30):
"""
Allows the moving average period to be set. Must be an integer.
"""
self.mva = mva
def transform_data(self, data=None):
"""
This is for scaling the original data for use in machine learning.
Takes a numpy array as input, divides by a moving average with period mva (integer),
and returns the scaled data as well as the scaler
and moving average (needed for returning the data to its original form).
"""
if data is None and self.data is None:
print('error! you need to supply the data here or when instantiating the class')
return None
elif data is None and self.data is not None:
data = self.data
self.datasets['orig'] = data
# take the moving average with period self.mva
rolling_mean = | pd.Series(data) | pandas.Series |
#!python
# builtin
import os
import sys
import logging
import json
import time
import contextlib
import multiprocessing
import urllib
import csv
# external
import numpy as np
import pandas as pd
import h5py
import pyteomics.mgf
# local
from ion_networks._version import __version__ as VERSION
from ion_networks import numba_functions
GITHUB_VERSION_FILE = "https://raw.githubusercontent.com/swillems/ion_networks/master/ion_networks/_version.py"
BASE_PATH = os.path.dirname(__file__)
UPDATE_COMMAND = os.path.join(os.path.dirname(BASE_PATH), "install", "update.sh")
LIB_PATH = os.path.join(BASE_PATH, "lib")
DEFAULT_PARAMETER_PATH = os.path.join(LIB_PATH, "default_parameters")
DEFAULT_PARAMETER_FILES = {
"convert": "convert_parameters.json",
"create": "create_parameters.json",
"evidence": "evidence_parameters.json",
"interface": "interface_parameters.json",
"database": "database_parameters.json",
"annotation": "annotation_parameters.json",
"mgf": "mgf_parameters.json",
}
DATA_TYPE_FILE_EXTENSIONS = {
"DDA": ".mgf",
"SONAR": "_Apex3DIons.csv",
"HDMSE": "_Apex3DIons.csv",
"SWIMDIA": "_Apex3DIons.csv",
"DIAPASEF": "_centroids.hdf",
}
LOGGER = logging.getLogger("Ion-networks")
MAX_THREADS = 1
@contextlib.contextmanager
def open_logger(log_file_name, log_level=logging.INFO):
# TODO: Docstring
start_time = time.time()
formatter = logging.Formatter('%(asctime)s > %(message)s')
LOGGER.setLevel(log_level)
if not LOGGER.hasHandlers():
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(log_level)
console_handler.setFormatter(formatter)
LOGGER.addHandler(console_handler)
if log_file_name is not None:
if log_file_name == "":
log_file_name = BASE_PATH
else:
log_file_name = os.path.abspath(log_file_name)
if os.path.isdir(log_file_name):
log_file_name = os.path.join(log_file_name, "log.txt")
directory = os.path.dirname(log_file_name)
if not os.path.exists(directory):
os.makedirs(directory)
file_handler = logging.FileHandler(log_file_name, mode="a")
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.info("=" * 50)
LOGGER.info(f"COMMAND: ion_networks {' '.join(sys.argv[1:])}")
LOGGER.info(f"VERSION: {VERSION}")
LOGGER.info(f"LOGFILE: {log_file_name}")
LOGGER.info("")
try:
yield LOGGER
LOGGER.info("")
LOGGER.info("Successfully finished execution")
except:
LOGGER.info("")
LOGGER.exception("Something went wrong, execution incomplete!")
finally:
LOGGER.info(f"Time taken: {time.time() - start_time}")
LOGGER.info("=" * 50)
if log_file_name is not None:
LOGGER.removeHandler(file_handler)
def read_parameters_from_json_file(file_name="", default=""):
"""
Read a custom or default parameter file.
Parameters
----------
default : str
The default parameters that should be loaded. Options are:
"create"
"evidence"
"interface"
""
file_name : str
The name of a .json file that contains parameters defined by the user.
These will override the default parameters.
Returns
-------
dict
A dictionary with parameters.
"""
if default == "":
parameters = {"log_file_name": ""}
else:
default_parameter_file_name = os.path.join(
DEFAULT_PARAMETER_PATH,
DEFAULT_PARAMETER_FILES[default]
)
with open(default_parameter_file_name, "r") as in_file:
parameters = json.load(in_file)
if file_name != "":
with open(file_name, "r") as in_file:
user_defined_parameters = json.load(in_file)
parameters.update(user_defined_parameters)
# TODO: Numba expects proper floats or integers, not a mixture
# TODO: e.g. DT_error = 2.0, instead of DT_error = 2
if "threads" in parameters:
set_threads(parameters["threads"])
return parameters
def set_threads(threads):
global MAX_THREADS
max_cpu_count = multiprocessing.cpu_count()
if threads > max_cpu_count:
MAX_THREADS = max_cpu_count
else:
while threads <= 0:
threads += max_cpu_count
MAX_THREADS = threads
def get_file_names_with_extension(input_path, extension=""):
"""
Get all file names with a specific extension from a list of files and
folders.
Parameters
----------
input_path : iterable[str]
An iterable with files or folders from which all files with a specific
extension need to be selected.
extension : str
The extension of the files of interest.
Returns
-------
list
A sorted list with unique file names with the specific extension.
"""
input_files = set()
if not isinstance(extension, str):
for tmp_extension in extension:
for file_name in get_file_names_with_extension(
input_path,
tmp_extension
):
input_files.add(file_name)
else:
for current_path in input_path:
if os.path.isfile(current_path):
if current_path.endswith(extension):
input_files.add(current_path)
elif os.path.isdir(current_path):
for current_file_name in os.listdir(current_path):
if current_file_name.endswith(extension):
file_name = os.path.join(
current_path,
current_file_name
)
input_files.add(file_name)
return sorted([os.path.abspath(file_name) for file_name in input_files])
def read_data_from_file(
data_type,
file_name,
log_transform_intensity=True,
):
"""
Convert an [input_file.*] file to a pd.DataFrame with as columns the
dimensions associated with the data type.
Parameters
----------
data_type : str
The data type of the [input_file.*] file. Options are:
'DDA'
'SONAR'
'HDMSE'
'SWIMDIA'
'DIAPASEF'
file_name : str
The file name containing centroided ions.
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
if data_type == "DDA":
read_function = read_data_from_mgf_file
elif data_type == "SONAR":
read_function = read_data_from_sonar_file
elif data_type == "HDMSE":
read_function = read_data_from_hdmse_file
elif data_type == "SWIMDIA":
read_function = read_data_from_swimdia_file
elif data_type == "DIAPASEF":
read_function = read_data_from_diapasef_file
data = read_function(
file_name,
log_transform_intensity=log_transform_intensity,
)
return data
def read_data_from_mgf_file(
file_name,
log_transform_intensity=True,
):
"""
Convert an [mgf_input.mgf] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_MZ, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the DDA .mgf file (generated with ms-convert).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading mgf file {file_name}")
mz1s = []
mz2s = []
rts = []
ints = []
for spectrum in pyteomics.mgf.read(file_name):
peak_count = len(spectrum["intensity array"])
ints.append(spectrum["intensity array"])
mz2s.append(spectrum["m/z array"])
rts.append(
np.repeat(spectrum["params"]["rtinseconds"] / 60, peak_count)
)
mz1s.append(np.repeat(spectrum["params"]["pepmass"][0], peak_count))
mz1s = np.concatenate(mz1s)
mz2s = np.concatenate(mz2s)
rts = np.concatenate(rts)
ints = np.concatenate(ints)
if log_transform_intensity:
ints = np.log2(ints)
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_MZ"
]
data = np.stack([mz2s, rts, ints, mz1s]).T
return pd.DataFrame(data, columns=dimensions)
def read_data_from_sonar_file(
file_name,
log_transform_intensity=True,
):
"""
Convert a [sonar_input.csv] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_MZ, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the SONAR .csv file (generated with Waters' Apex3d).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading sonar file {file_name}")
data = pd.read_csv(
file_name,
engine="c",
dtype=np.float,
usecols=["Function", "m_z", "rt", "mobility", "area"]
).values
data = data[np.searchsorted(data[:, 0], 2):, 1:]
if log_transform_intensity:
data[:, 2] = np.log2(data[:, 2])
data[:, 3] = 400 + data[:, 3] * (900 - 400) / 200
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_MZ"
]
return pd.DataFrame(data, columns=dimensions)
def read_data_from_hdmse_file(
file_name,
log_transform_intensity=True,
):
"""
Convert a [hdmse_input.csv] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_DT, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the HDMSE .csv file (generated with Waters' Apex3d).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_DT,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading hdmse file {file_name}")
data = pd.read_csv(
file_name,
engine="c",
dtype=np.float,
usecols=["Function", "m_z", "rt", "mobility", "area"]
).values
data = data[np.searchsorted(data[:, 0], 2):, 1:]
if log_transform_intensity:
data[:, 2] = np.log2(data[:, 2])
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_DT"
]
return | pd.DataFrame(data, columns=dimensions) | pandas.DataFrame |
from PyQt5 import QtCore
import pandas as pd
class Node(QtCore.QObject):
def __init__(self, name, dataframe=None, parent=None, metadata=None, delayed_params=None):
if type(name)!=str or len(name)==0 or '/' in name:
raise NamingException("Name must be a non-empty string with no '/' character")
super().__init__(parent=parent)
self.children = dict()
self.name = name
self.dataframe = dataframe
self.metadata = metadata
self.delayed_params = delayed_params
def set_data(self, dataframe):
self.dataframe = dataframe
def append_data(self, dataframe, **kwargs):
self.dataframe = self.dataframe.append(dataframe, **kwargs)
def get_data(self, keep_alive=True):
if self.dataframe is None and not self.delayed_params is None:
file_path, node_path = self.delayed_params
with pd.HDFStore(file_path, mode='r') as store:
df = store.get(node_path)
if keep_alive:
self.dataframe = df
return df
else:
return self.dataframe
def has_data(self):
return not self.dataframe is None
def add_node(self, name=None, node=None, overwrite=False, **kwargs):
if name is None and node is None:
raise Exception('Must specify either <name> or <node>')
if not name is None and not node is None:
raise Exception("Can't specify both <name> and <node>")
if node is None: node = Node(name, parent=self, **kwargs)
if type(node) != Node:
raise TypeError
if self.has_child(node.name) and not overwrite:
raise NodeExistsException
self.children[node.name] = node
return node
def has_child(self, name):
return name in self.children.keys()
def get_child(self, name):
if name in self.children:
return self.children[name]
else:
raise NodeNotFoundException
def add_meta(self, **kwargs):
if self.metadata is None:
self.metadata = kwargs
else:
self.metadata.update(kwargs)
def get_meta(self, keep_alive=True):
if self.metadata is None and not self.delayed_params is None:
file_path, node_path = self.delayed_params
with pd.HDFStore(file_path, mode='r') as store:
meta = store.get_storer(node_path).attrs.metadata
if keep_alive:
self.metadata = meta
else:
meta = self.metadata
if meta is None:
return {}
else:
return meta
def get_tree_str(self, prefix = '', previous_string=''):
s = previous_string + prefix + '+ ' + self.name +'\n'
prefix += ' '
for name in self.get_child_names():
s += self.children[name].get_tree_str(prefix=prefix)
return s
def get_child_names(self, sort=True):
child_names = self.children.keys()
if sort:
return sorted(child_names, key=str.lower)
else:
return child_names
def __str__(self):
return self.get_tree_str()
def __iter__(self):
for name in self.children:
yield self.children[name]
class Repository(QtCore.QObject):
_VERSION = '1.0'
def __init__(self, file_path, parent=None, delayed=True):
super().__init__(parent=parent)
self.root = Node('root')
self.root.set_data(pd.DataFrame())
self.root.add_meta(repository_version=self._VERSION)
self.file_path = file_path
self.load(delayed=delayed)
return
def load(self, delayed=True):
try:
with pd.HDFStore(self.file_path, mode='r') as store:
nodes_list = store.keys()
try:
nodes_list.pop(nodes_list.index('/__root'))
self.root.set_data(store.get('/__root'))
meta = store.get_storer('/__root').attrs.metadata
self.root.add_meta(**meta)
except:
print('Could not load data_index')
try:
version = self.root.get_meta()['repository_version']
if version != self._VERSION:
raise Exception
except:
print('File repository version does not match the current verison of the code... You may have to open manually')
nodes_list.sort(key=len)
for path in nodes_list:
if delayed:
self.add_node(path=path, overwrite=True, delayed_params=[self.file_path, path])
else:
df = store.get(path)
try:
meta = store.get_storer(path).attrs.metadata
except:
meta = dict()
self.add_node(path=path, dataframe=df, metadata=meta, overwrite=True)
except OSError:
print("File does not exist")
pass
def save(self, overwrite=False):
with | pd.HDFStore(self.file_path, mode='a') | pandas.HDFStore |
################################################################################
### Init
################################################################################
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
import os
import argparse
import multiprocessing
import pandas as pd
from tqdm import tqdm
################################################################################
### Script
################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='merge_csv.py',
description='Merges a list of csvs into a single output csv. Adds a column indicating the original filename.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-i','--input', nargs='+', help='Required. List of input csvs.', required=True)
parser.add_argument('output_csv', help='Output csv file.')
args = parser.parse_args()
logger.info("Checking input files")
for x in args.input:
assert os.path.exists(x)
input_files = args.input
input_pd = [ | pd.read_csv(x) | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
from . import get_data
class TeamStat:
def __init__(self,tid):
self.tid = tid
self.team_data = pd.DataFrame()
self.personal_data = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import preprocessor as p
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import wget
import dload
from pylatex import Document, Section, Subsection, Command
from pylatex.utils import italic, NoEscape
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import plot_roc_curve, plot_confusion_matrix
from datetime import date
from io import BytesIO
from io import StringIO
#from IPython import display
import base64
from wordcloud import WordCloud
import seaborn as sns
import uuid
from sklearn.metrics import auc
import requests
import io
import fileinput
# Download/create the dataset
def fetch():
print("fetching dataset!") # replace this with code to fetch the dataset
url = 'https://raw.githubusercontent.com/vijayakuruba/Data/main/gender-classifier-DFE-791531.csv'
wget.download(url)
print("Download complete!")
def clean_data(df):
tweets = []
for line in df:
# send to tweet_processor
line_cleaned = p.clean(line)
line_cleaned = line_cleaned.lower()
tweets.append(line_cleaned)
return tweets
def prepare_data(df):
#clean_tweets(df)
df_tweet = clean_data(df["text"])
df_tweet = pd.DataFrame(df_tweet)
df_text = clean_data(df["description"].fillna(""))
df_text = | pd.DataFrame(df_text) | pandas.DataFrame |
import argparse
import os
import requests
import tempfile
import numpy as np
import pandas as pd
import sys
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
import logging
import logging.handlers
def _get_logger():
'''
๋ก๊น
์ ์ํด ํ์ด์ฌ ๋ก๊ฑฐ๋ฅผ ์ฌ์ฉ
# https://stackoverflow.com/questions/17745914/python-logging-module-is-printing-lines-multiple-times
'''
loglevel = logging.DEBUG
l = logging.getLogger(__name__)
if not l.hasHandlers():
l.setLevel(loglevel)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
l.handler_set = True
return l
logger = _get_logger()
# Since we get a headerless CSV file we specify the column names here.
feature_columns_names = [
"sex",
"length",
"diameter",
"height",
"whole_weight",
"shucked_weight",
"viscera_weight",
"shell_weight",
]
label_column = "rings"
feature_columns_dtype = {
"sex": str,
"length": np.float64,
"diameter": np.float64,
"height": np.float64,
"whole_weight": np.float64,
"shucked_weight": np.float64,
"viscera_weight": np.float64,
"shell_weight": np.float64
}
label_column_dtype = {"rings": np.float64}
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
if __name__ =='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', type=str, default="/opt/ml/processing")
parser.add_argument('--dataset_file_path', type=str, default="input/untitled.csv")
parser.add_argument('--label_column', type=str, default="fraud")
# parse arguments
args = parser.parse_args()
logger.info("#############################################")
logger.info(f"args.base_dir: {args.base_dir}")
logger.info(f"args.dataset_file_path: {args.dataset_file_path}")
logger.info(f"args.label_column: {args.label_column}")
##############################################
base_dir = args.base_dir
dataset_file_path = args.dataset_file_path
label_column = args.label_column
df = pd.read_csv(
f"{base_dir}/{dataset_file_path}",
# header=None,
# names=feature_columns_names + [label_column],
# dtype=merge_two_dicts(feature_columns_dtype, label_column_dtype
# )
)
logger.info(f"dataset sample \n {df.head(2)}")
logger.info(f"df columns \n {df.columns}")
y = df.pop(label_column)
float_cols = df.select_dtypes(include=['float']).columns.values
int_cols = df.select_dtypes(include=['int']).columns.values
numeric_features = np.concatenate((float_cols, int_cols), axis=0).tolist()
# numeric_features = list(feature_columns_names)
# numeric_features.remove("sex")
numeric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler())
]
)
categorical_features = df.select_dtypes(include=['object']).columns.values.tolist()
# categorical_features = ["sex"]
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(handle_unknown="ignore"))
]
)
preprocess = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features)
]
)
X_pre = preprocess.fit_transform(df)
y_pre = y.to_numpy().reshape(len(y), 1)
X = np.concatenate((y_pre, X_pre), axis=1)
np.random.shuffle(X)
train, validation, test = np.split(X, [int(.7*len(X)), int(.85*len(X))])
pd.DataFrame(train).to_csv(f"{base_dir}/train/train.csv", header=False, index=False)
pd.DataFrame(validation).to_csv(f"{base_dir}/validation/validation.csv", header=False, index=False)
pd.DataFrame(test).to_csv(f"{base_dir}/test/test.csv", header=False, index=False)
logger.info(f"preprocessed train sample \n { | pd.DataFrame(train) | pandas.DataFrame |
"""New style (fast) tag count annos
Use these for new projects.
"""
from mbf_genomics.annotator import Annotator
import numpy as np
import pypipegraph as ppg
import hashlib
import pandas as pd
from pathlib import Path
from dppd import dppd
import dppd_plotnine # noqa:F401
from mbf_qualitycontrol import register_qc, QCCollectingJob, qc_disabled
from mbf_genomics.util import parse_a_or_c_to_plot_name
dp, X = dppd()
# ## Base classes and strategies - skip these if you just care about using TagCount annotators
class _CounterStrategyBase:
cores_needed = 1
def extract_lookup(self, data):
"""Adapter for count strategies that have different outputs
(e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)
"""
return data
class CounterStrategyStrandedRust(_CounterStrategyBase):
cores_needed = -1
name = "stranded"
def __init__(self):
self.disable_sanity_check = False
def count_reads(
self, interval_strategy, genome, bam_filename, bam_index_name, reverse=False
):
# bam_filename = bamfil
intervals = interval_strategy._get_interval_tuples_by_chr(genome)
gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)
from mbf_bam import count_reads_stranded
res = count_reads_stranded(
bam_filename, bam_index_name, intervals, gene_intervals
)
self.sanity_check(res, bam_filename)
return res
def sanity_check(self, foward_and_reverse, bam_filename):
if self.disable_sanity_check:
return
error_count = 0
forward, reverse = foward_and_reverse
for gene_stable_id, forward_count in forward.items():
reverse_count = reverse.get(gene_stable_id, 0)
if (reverse_count > 100) and (reverse_count > forward_count * 1.1):
error_count += 1
if error_count > 0.1 * len(forward):
raise ValueError(
"Found at least %.2f%% of genes to have a reverse read count (%s) "
"above 110%% of the exon read count (and at least 100 tags). "
"This indicates that this lane (%s) should have been reversed before alignment. "
"Set reverse_reads=True on your Lane object"
% (
100.0 * error_count / len(forward),
self.__class__.__name__,
bam_filename,
)
)
def extract_lookup(self, data):
"""Adapter for count strategies that have different outputs
(e.g. one-hashmap-unstranded or two-hashmaps-one-forward-one-reversed)
"""
return data[0]
class CounterStrategyUnstrandedRust(_CounterStrategyBase):
cores_needed = -1
name = "unstranded"
def count_reads(
self, interval_strategy, genome, bam_filename, bam_index_name, reverse=False
):
# bam_filename = bamfil
intervals = interval_strategy._get_interval_tuples_by_chr(genome)
gene_intervals = IntervalStrategyGene()._get_interval_tuples_by_chr(genome)
# chr -> [gene_id, strand, [start], [stops]
from mbf_bam import count_reads_unstranded
res = count_reads_unstranded(
bam_filename, bam_index_name, intervals, gene_intervals
)
return res
class _IntervalStrategy:
def get_interval_lengths_by_gene(self, genome):
by_chr = self._get_interval_tuples_by_chr(genome)
length_by_gene = {}
for chr, tups in by_chr.items():
for tup in tups: # stable_id, strand, [starts], [stops]
gene_stable_id = tup[0]
length = 0
for start, stop in zip(tup[2], tup[3]):
length += stop - start
length_by_gene[gene_stable_id] = length
return length_by_gene
def _get_interval_tuples_by_chr(self, genome): # pragma: no cover
raise NotImplementedError()
def get_deps(self):
return []
class IntervalStrategyGenomicRegion(_IntervalStrategy):
"""Used internally by _FastTagCounterGR"""
def __init__(self, gr):
self.gr = gr
self.name = f"GR_{gr.name}"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
if self.gr.genome != genome: # pragma: no cover
raise ValueError("Mismatched genomes")
df = self.gr.df
if not "strand" in df.columns:
df = df.assign(strand=1)
df = df[["chr", "start", "stop", "strand"]]
if df.index.duplicated().any():
raise ValueError("index must be unique")
for tup in df.itertuples():
result[tup.chr].append((str(tup[0]), tup.strand, [tup.start], [tup.stop]))
return result
class IntervalStrategyGene(_IntervalStrategy):
"""Count from TSS to TES"""
name = "gene"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
gene_info = genome.df_genes
for tup in gene_info[["chr", "start", "stop", "strand"]].itertuples():
result[tup.chr].append((tup[0], tup.strand, [tup.start], [tup.stop]))
return result
class IntervalStrategyExon(_IntervalStrategy):
"""count all exons"""
name = "exon"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for gene in genome.genes.values():
exons = gene.exons_merged
result[gene.chr].append(
(gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))
)
return result
class IntervalStrategyIntron(_IntervalStrategy):
"""count all introns"""
name = "intron"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for gene in genome.genes.values():
exons = gene.introns_strict
result[gene.chr].append(
(gene.gene_stable_id, gene.strand, list(exons[0]), list(exons[1]))
)
return result
class IntervalStrategyExonSmart(_IntervalStrategy):
"""For protein coding genes: count only in exons of protein-coding transcripts.
For other genes: count all exons"""
name = "exonsmart"
def _get_interval_tuples_by_chr(self, genome):
result = {chr: [] for chr in genome.get_chromosome_lengths()}
for g in genome.genes.values():
e = g.exons_protein_coding_merged
if len(e[0]) == 0:
e = g.exons_merged
result[g.chr].append((g.gene_stable_id, g.strand, list(e[0]), list(e[1])))
return result
# Now the actual tag count annotators
class TagCountCommonQC:
def register_qc(self, genes):
if not qc_disabled():
self.register_qc_distribution(genes)
self.register_qc_pca(genes)
# self.register_qc_cummulative(genes)
def register_qc_distribution(self, genes):
output_filename = genes.result_dir / self.qc_folder / f"read_distribution.png"
output_filename.parent.mkdir(exist_ok=True)
def plot(output_filename, elements):
df = genes.df
df = dp(df).select({x.aligned_lane.name: x.columns[0] for x in elements}).pd
if len(df) == 0:
df = pd.DataFrame({"x": [0], "y": [0], "text": "no data"})
dp(df).p9().add_text("x", "y", "text").render(output_filename).pd
else:
plot_df = dp(df).melt(var_name="sample", value_name="count").pd
plot = dp(plot_df).p9().theme_bw()
print(df)
if ((df > 0).sum(axis=0) > 1).any() and len(df) > 1:
plot = plot.geom_violin(
dp.aes(x="sample", y="count"), width=0.5, bw=0.1
)
if len(plot_df["sample"].unique()) > 1:
plot = plot.annotation_stripes(fill_range=True)
if (plot_df["count"] > 0).any():
# can't have a log boxplot with all nans (log(0))
plot = plot.scale_y_continuous(
trans="log10",
name=self.qc_distribution_scale_y_name,
breaks=[1, 10, 100, 1000, 10000, 100_000, 1e6, 1e7],
)
print(plot_df)
return (
plot.add_boxplot(
x="sample", y="count", _width=0.1, _fill=None, _color="blue"
)
.turn_x_axis_labels()
.title("Raw read distribution")
.hide_x_axis_title()
.render_args(limitsize=False)
.render(output_filename, width=0.2 * len(elements) + 1, height=4)
)
return register_qc(
QCCollectingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.add(self)
)
def register_qc_pca(self, genes):
output_filename = genes.result_dir / self.qc_folder / f"pca.png"
def plot(output_filename, elements):
import sklearn.decomposition as decom
if len(elements) == 1:
xy = np.array([[0], [0]]).transpose()
title = "PCA %s - fake / single sample" % genes.name
else:
pca = decom.PCA(n_components=2, whiten=False)
data = genes.df[[x.columns[0] for x in elements]]
data -= data.min() # min max scaling 0..1
data /= data.max()
data = data[~pd.isnull(data).any(axis=1)] # can' do pca on NAN values
if len(data):
pca.fit(data.T)
xy = pca.transform(data.T)
title = "PCA %s\nExplained variance: x %.2f%%, y %.2f%%" % (
genes.name,
pca.explained_variance_ratio_[0] * 100,
pca.explained_variance_ratio_[1] * 100,
)
else:
xy = np.array(
[[0] * len(elements), [0] * len(elements)]
).transpose()
title = "PCA %s - fake / no rows" % genes.name
plot_df = pd.DataFrame(
{"x": xy[:, 0], "y": xy[:, 1], "label": [x.plot_name for x in elements]}
)
print(plot_df)
(
dp(plot_df)
.p9()
.theme_bw()
.add_scatter("x", "y")
.add_text(
"x",
"y",
"label",
# cool, this can go into an endless loop...
# _adjust_text={
# "expand_points": (2, 2),
# "arrowprops": {"arrowstyle": "->", "color": "red"},
# },
)
.scale_color_many_categories()
.title(title)
.render(output_filename, width=8, height=6)
)
return register_qc(
QCCollectingJob(output_filename, plot)
.depends_on(genes.add_annotator(self))
.add(self)
)
class _FastTagCounter(Annotator, TagCountCommonQC):
def __init__(
self, aligned_lane, count_strategy, interval_strategy, column_name, column_desc
):
if not hasattr(aligned_lane, "get_bam"):
raise ValueError("_FastTagCounter only accepts aligned lanes!")
self.aligned_lane = aligned_lane
self.genome = self.aligned_lane.genome
self.count_strategy = count_strategy
self.interval_strategy = interval_strategy
self.columns = [(column_name % (self.aligned_lane.name,)).strip()]
self.cache_name = (
"FT_%s_%s" % (count_strategy.name, interval_strategy.name)
+ "_"
+ hashlib.md5(self.columns[0].encode("utf-8")).hexdigest()
)
self.column_properties = {self.columns[0]: {"description": column_desc}}
self.vid = aligned_lane.vid
self.cores_needed = count_strategy.cores_needed
self.plot_name = self.aligned_lane.name
self.qc_folder = f"{self.count_strategy.name}_{self.interval_strategy.name}"
self.qc_distribution_scale_y_name = "raw counts"
def calc(self, df):
if ppg.inside_ppg():
data = self._data
else:
data = self.calc_data()
lookup = self.count_strategy.extract_lookup(data)
result = []
for gene_stable_id in df["gene_stable_id"]:
result.append(lookup.get(gene_stable_id, 0))
result = np.array(result, dtype=np.float)
return | pd.Series(result) | pandas.Series |
"""Script to sanitize labels and convert annotations to the classifier format.
"""
import argparse
import json
import logging
import nltk
import os
import pandas
import utils
logging.basicConfig(level=logging.INFO)
DEFAULT_LABEL = 'O'
def read_arguments():
"""Parses the arguments from the stdin and returns an object."""
parser = argparse.ArgumentParser()
parser.add_argument('annotated_documents', type=str,
help='Path of directory with the annotated files')
parser.add_argument('--output_directory', type=str,
help='Path of directory to save the files')
parser.add_argument('--mapping_filename', type=str,
help='Filename to read the mappings.')
parser.add_argument('--update_mappings', action='store_true',
help='Add new labels found to mapping.')
return parser.parse_args()
def add_columns(annotation):
new_column_index = []
new_column_pos = []
sentence_count = 0
sentence = []
for row in annotation.iterrows():
if pandas.isnull(row[1]['labels']):
new_column_pos += [x[1] for x in nltk.pos_tag(sentence)]
new_column_pos.append(pandas.np.nan)
sentence = []
new_column_index.append(pandas.np.nan)
else:
new_column_index.append(len(sentence))
sentence.append(row[1]['tokens'])
annotation['sentence_index'] = new_column_index
annotation['pos_tag'] = new_column_pos
def separate_labels(annotation):
"""Adds columns with raw lkif and yago uri labels"""
def split_function(row):
if row == DEFAULT_LABEL:
return pandas.Series([DEFAULT_LABEL] * 2)
uri, lkif = row.split('##')
ner_tag = uri[0]
return pandas.Series([uri, ner_tag + '-' + lkif])
annotation[['uri_tag', 'lkif_tag']] = annotation['labels'].apply(
split_function)
def apply_mappings(annotation, mappings):
"""Replace labels according to mappings."""
def get_or_update_label(mapping_key, label_name, row):
original_label = row[label_name][2:]
if original_label not in mappings[mapping_key]:
# Add missing label
mappings[mapping_key][original_label] = None
if mappings[mapping_key][original_label] == "":
return DEFAULT_LABEL
if mappings[mapping_key][original_label] is not None:
return row[label_name][:2] + mappings[mapping_key][original_label]
return row[label_name][:2] + original_label
def apply_function(row):
if row['lkif_tag'] == DEFAULT_LABEL:
return | pandas.Series([DEFAULT_LABEL] * 4) | pandas.Series |
import pandas as pd
from pathlib import Path
import os
import numpy as np
import re
root = os.path.dirname(os.getcwd())
html_url_repo = pd.read_csv(root + '/Dataset/3out.csv', header=None)
# import maven comments which is stored as stdout file
files = Path(root + '/Dataset/').glob('**/stdout')
SATD_comments = pd.DataFrame(columns=['Link Location', 'Comment', 'Keywords'])
Comments_with_no_keywords = pd.DataFrame(columns=['Link Location', 'Comment'])
revision_list = pd.DataFrame(columns=['Repository ID', 'Revison'])
with open(root + '/Dataset/keywords_list.txt', 'r') as file:
keywords_debt = file.read().split(', ')
maven_comments = 0
maven_repo = 0
pom_file = 0
error = []
for file in files:
df = | pd.read_json(file) | pandas.read_json |
from trading import models, serializers, services
import pandas as pd
import requests
import json
def create_update_symbols():
symbols = services.get_symbols()
for element in symbols:
symbol_obj, created = models.Symbols.objects.update_or_create(
symbol=element.get('symbol', None),
defaults={
'symbol': element.get('symbol', None),
'name': element.get('name', None),
'isEnabled': element.get('isEnabled', None),
'iexId': element.get('iexId', None),
'date': element.get('date', None),
'symbol_type': element.get('type', None),
}
)
def create_update_history(symbol, range_time):
history = services.get_history_data(symbol,range_time)
for element in history:
history_obj, created = models.HistoricalPrices.objects.filter(symbol=symbol).update_or_create(
date=element.get('date', None),
defaults={
'symbol_id': symbol,
'date': element.get('date', None),
'open_price': element.get('open', None),
'high_price': element.get('high', None),
'low_price': element.get('low', None),
'close_price': element.get('close', None),
'volume': element.get('volume', None),
'uOpen': element.get('uOpen', None),
'uHigh': element.get('uHigh', None),
'uLow': element.get('uLow', None),
'uClose': element.get('uClose', None),
'uVolume': element.get('uVolume', None),
'change': element.get('change', None),
'changePercent': element.get('changePercent', None),
'changeOverTime': element.get('changeOverTime', None),
}
)
def get_recommendations():
df_recomendation = | pd.DataFrame() | pandas.DataFrame |
"""
filename chapter_id speaker_id dataset_id
0 1272/128104/1272-128104-0000.wav 128104 1272 dev-clean
1 1272/128104/1272-128104-0001.wav 128104 1272 dev-clean
2 1272/128104/1272-128104-0002.wav 128104 1272 dev-clean
3 1272/128104/1272-128104-0003.wav 128104 1272 dev-clean
4 1272/128104/1272-128104-0004.wav 128104 1272 dev-clean
5 1272/128104/1272-128104-0005.wav 128104 1272 dev-clean
6 1272/128104/1272-128104-0006.wav 128104 1272 dev-clean
7 1272/128104/1272-128104-0007.wav 128104 1272 dev-clean
8 1272/128104/1272-128104-0008.wav 128104 1272 dev-clean
9 1272/128104/1272-128104-0009.wav 128104 1272 dev-clean
"""
import logging
import numpy as np
import pandas as pd
from python_speech_features import fbank, delta
import ds_constants as c
from ds_constants import SAMPLE_RATE
from voxceleb_wav_reader import read_audio
#def normalize_frames(m):
# return [(v - np.mean(v)) / np.std(v) for v in m]
def normalize_frames(m,epsilon=1e-12):
return [(v - np.mean(v)) / max(np.std(v),epsilon) for v in m]
def pre_process_inputs(signal=np.random.uniform(size=32000), target_sample_rate=8000):
filter_banks, energies = fbank(signal, samplerate=target_sample_rate, nfilt=64, winlen=0.025)
delta_1 = delta(filter_banks, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(filter_banks)
delta_1 = normalize_frames(delta_1)
delta_2 = normalize_frames(delta_2)
frames_features = np.hstack([filter_banks, delta_1, delta_2])
num_frames = len(frames_features)
network_inputs = []
for j in range(8, num_frames - 8):
frames_slice = frames_features[j - 8:j + 8]
network_inputs.append(np.reshape(frames_slice, (32, 32, 3)))
return np.array(network_inputs)
class MiniBatch:
def __init__(self, voxceleb, batch_size):
# indices = np.random.choice(len(libri), size=batch_size, replace=False)
# [anc1, anc2, anc3, pos1, pos2, pos3, neg1, neg2, neg3]
# [sp1, sp2, sp3, sp1, sp2, sp3, sp4, sp5, sp6]
unique_speakers = list(voxceleb['speaker_id'].unique())
num_triplets = batch_size
anchor_batch = None
positive_batch = None
negative_batch = None
for ii in range(num_triplets):
two_different_speakers = np.random.choice(unique_speakers, size=2, replace=False)
anchor_positive_speaker = two_different_speakers[0]
negative_speaker = two_different_speakers[1]
anchor_positive_file = voxceleb[voxceleb['speaker_id'] == anchor_positive_speaker].sample(n=2, replace=False)
anchor_df = | pd.DataFrame(anchor_positive_file[0:1]) | pandas.DataFrame |
#!/usr/bin/python3
import sys
import pandas as pd
import numpy as np
import os
import concurrent.futures
import functools, itertools
import sofa_time
import statistics
import multiprocessing as mp
import socket
import ipaddress
# sys.path.insert(0, '/home/st9540808/Desktop/sofa/bin')
import sofa_models, sofa_preprocess
import sofa_config
import sofa_print
colors_send = ['#14f2e0', '#41c8e5', '#6e9eeb']
colors_recv = ['#9a75f0', '#c74bf6', '#f320fa', '#fe2bcc']
color_send = itertools.cycle(colors_send)
color_recv = itertools.cycle(colors_recv)
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit", # 13
"msg_id"] # 14
# @profile
def extract_individual_rosmsg(df_send_, df_recv_, *df_others_):
""" Return a dictionary with topic name as key and
a list of ros message as value.
Structure of return value: {topic_name: {(guid, seqnum): log}}
where (guid, seqnum) is a msg_id
"""
# Convert timestamp to unix time
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, *df_others):
# df['ts'] = df['ts'] + unix_time_off
df_send_[1]['ts'] = df_send_[1]['ts'] + df_send_[0].cpu_time_offset + df_send_[0].unix_time_off
df_recv_[1]['ts'] = df_recv_[1]['ts'] + df_recv_[0].cpu_time_offset + df_recv_[0].unix_time_off
df_others = []
for cfg_to_pass, df_other in df_others_:
df_other['ts'] = df_other['ts'] + cfg_to_pass.cpu_time_offset + cfg_to_pass.unix_time_off
df_others.append(df_other)
df_send = df_send_[1]
df_recv = df_recv_[1]
# sort by timestamp
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# other logs (assume there's no happen-before relations that needed to be resolved)
# every dataframe is a dictionary in `other_log_list`
gb_others = [df_other.groupby('guid') for df_other in df_others]
other_log_list = [{guid:log for guid, log in gb_other} for gb_other in gb_others]
# find guids that are in both subsciption and publisher log
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid]
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
print(pubaddr)
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
# print(add_data_calls)
all_RTPSMsg_idx = ((df_send['func'] == '~RTPSMessageGroup') & (df_send['publisher'] == pubaddr))
all_RTPSMsgret_idx = ((df_send['func'] == '~RTPSMessageGroup exit') & (df_send['publisher'] == pubaddr))
all_sendSync_idx = ((df_send['func'] == 'sendSync') & (df_send['publisher'] == pubaddr))
all_nn_xpack_idx = (df['func'] == 'nn_xpack_send1')
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# For grouping RTPSMessageGroup function
try:
ts_gt = (df_send['ts'] > ts) # ts greater than that of add_data_call
RTPSMsg_idx = df_send.loc[ts_gt & all_RTPSMsg_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsg_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
RTPSMsgret_idx = df_send.loc[ts_gt & all_RTPSMsgret_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsgret_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
sendSync_idx = df_send.loc[ts_gt & (df_send['ts'] < df_send.loc[RTPSMsgret_idx, 'ts']) & all_sendSync_idx]
sendSync = sendSync_idx.copy()
sendSync['seqnum'] = add_data_call.loc['seqnum']
modified_rows.extend(row for _, row in sendSync.iterrows())
except ValueError as e:
pass
if 'rmw_cyclonedds_cpp' in df['implementation'].values:
try:
df_cls = other_log_list[0][guid]
seqnum = add_data_call.loc['seqnum']
max_ts = df_cls[(df_cls['layer'] == 'cls_egress') & (df_cls['seqnum'] == seqnum)]['ts'].max()
index = df.loc[(ts < df['ts']) & (df['ts'] < max_ts) & all_nn_xpack_idx].index
df_send_partial.loc[index, 'seqnum'] = seqnum
except ValueError as e:
pass
df_send_partial = pd.concat([df_send_partial, pd.DataFrame(modified_rows)])
# get a subscrption from log
df = all_subscriptions_log[guid]
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
if 'cyclonedds' in df['layer'].unique():
add_recvchange_calls = df[df['func'] == 'ddsi_udp_conn_read exit']
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = | pd.unique(df_recv['pid']) | pandas.unique |
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pandas as pd
import numpy as np
import os
import sys
import json
from influxdb import InfluxDBClient
from influxdb import DataFrameClient
sys.path.insert(0, '../')
from utils import create_dataframe
# Connecting to influxDb
influxdb1_client = InfluxDBClient("localhost", "8086", "admin","admin","Predictions_testset1")
influxdb1_client.create_database("Predictions_testset1")
influxdb2_client = InfluxDBClient("localhost", "8086", "admin","admin","Predictions_testset2")
influxdb2_client.create_database("Predictions_testset2")
dbname = input("Enter the database name: ")
if(dbname == '1st_test' or dbname == '2nd_test'):
if (dbname=="1st_test"):
no_of_bearings = 8
influxdb_client = DataFrameClient("localhost", "8086", "admin","admin",dbname)
elif(dbname == "2nd_test"):
no_of_bearings = 4
influxdb_client = DataFrameClient("localhost", "8086", "admin","admin",dbname)
frequency_component1, frequency_component2, frequency_component3, frequency_component4, frequency_component5 = ( | pd.DataFrame() | pandas.DataFrame |
def sim_reg_data(xmin, xmax, ymin, ymax, n, sd):
import pandas as pd
import numpy.random as nr
w = nr.normal(loc = 0, scale = sd, size = n)
xstep = float(xmax - xmin)/float(n - 1)
ystep = float(ymax - ymin)/float(n - 1)
x = []
xcur = xmin
y = []
ycur = ymin
for i in range(n):
x.append(xcur)
xcur += xstep
y.append(ycur + w[i])
ycur += ystep
out = | pd.DataFrame([x, y]) | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os
import seaborn as sns
import matplotlib.dates as mdates
import sys
sys.path.append('../')
from processing_helpers import *
from load_paths import load_box_paths
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Process simulation outputs to send to Civis"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-stem", "--stem",
type=str,
help="Name of experiment and folder name",
default=None,
)
parser.add_argument(
"-p", "--processStep",
type=str,
help="Only required if files are too large to process regions in a loop",
default='generate_outputs',
)
parser.add_argument(
"-l", "--Location",
type=str,
help="Local or NUCLUSTER",
default='Local',
)
return parser.parse_args()
def get_scenarioName(exp_suffix) :
scenarioName = exp_suffix
if exp_suffix == "reopen": scenarioName = "reopen_gradual"
if exp_suffix == "gradual": scenarioName = "reopen_gradual"
if exp_suffix == "interventionStop": scenarioName = "endsip"
if exp_suffix == "0": scenarioName = "baseline"
if exp_suffix == "neverSIP": scenarioName = "neversip"
if exp_suffix == "stopSIP30": scenarioName = "july1partial30"
if exp_suffix == "stopSIP10": scenarioName = "july1partial10"
return(scenarioName)
def plot_sim(dat,suffix,channels) :
if suffix not in ["All","central","southern","northeast","northcentral"]:
suffix_nr = str(suffix.split("-")[1])
if suffix == "All":
suffix_nr ="illinois"
capacity = load_capacity(suffix_nr)
fig = plt.figure(figsize=(18, 12))
fig.subplots_adjust(right=0.97, wspace=0.2, left=0.07, hspace=0.15)
palette = sns.color_palette('Set1', len(channels))
for c, channel in enumerate(channels):
ax = fig.add_subplot(3, 3, c + 1)
ax.plot(dat['date'], dat['%s_median' % channel], color=palette[c])
ax.fill_between(dat['date'].values, dat['%s_95CI_lower' % channel], dat['%s_95CI_upper' % channel],
color=palette[c], linewidth=0, alpha=0.2)
ax.fill_between(dat['date'].values, dat[ '%s_50CI_lower' % channel], dat[ '%s_50CI_upper' % channel],
color=palette[c], linewidth=0, alpha=0.4)
if channel in capacity.keys():
ax.plot([np.min(dat['date']), np.max(dat['date'])],
[capacity[channel], capacity[channel]], '--', linewidth=2, color=palette[c])
ax.set_title(channel, y=0.85)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\n%y'))
plotname = f'{scenarioName}_{suffix}'
plotname = plotname.replace('EMS-','covidregion_')
plt.savefig(os.path.join(plot_path, plotname + '.png'))
plt.savefig(os.path.join(plot_path, 'pdf', plotname + '.pdf'), format='PDF')
# plt.show()
def load_and_plot_data(ems_region, savePlot=True) :
region_suffix = f'_{str(ems_region)}'
column_list = ['startdate', 'time', 'scen_num', 'sample_num', 'run_num']
outcome_channels = ['susceptible', 'infected', 'recovered', 'infected_cumul','asymp_cumul','asymp_det_cumul', 'symp_mild_cumul', 'symp_severe_cumul', 'symp_mild_det_cumul',
'symp_severe_det_cumul', 'hosp_det_cumul', 'hosp_cumul', 'detected_cumul', 'crit_cumul', 'crit_det_cumul', 'deaths_det_cumul',
'deaths', 'crit_det', 'critical', 'hosp_det', 'hospitalized']
for channel in outcome_channels:
column_list.append(channel + region_suffix)
df = load_sim_data(exp_name,region_suffix = region_suffix, column_list=column_list)
df['ems'] = ems_region
df['ventilators'] = get_vents(df['crit_det'].values)
df['new_symptomatic'] = df['new_symptomatic_severe'] + df['new_symptomatic_mild'] + df['new_detected_symptomatic_severe'] + df['new_detected_symptomatic_mild']
channels = ['infected', 'new_infected', 'new_symptomatic', 'new_deaths', 'new_detected_deaths', 'hospitalized', 'critical', 'hosp_det', 'crit_det', 'ventilators', 'recovered']
plotchannels = ['infected', 'new_infected', 'new_symptomatic', 'new_deaths', 'new_detected_deaths', 'hosp_det', 'crit_det', 'ventilators', 'recovered']
adf = pd.DataFrame()
for c, channel in enumerate(channels):
mdf = df.groupby(['date', 'ems'])[channel].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()
mdf = mdf.rename(columns={'CI_50': '%s_median' % channel,
'CI_2pt5': '%s_95CI_lower' % channel,
'CI_97pt5': '%s_95CI_upper' % channel,
'CI_25': '%s_50CI_lower' % channel,
'CI_75': '%s_50CI_upper' % channel})
if adf.empty:
adf = mdf
else:
adf = | pd.merge(left=adf, right=mdf, on=['date', 'ems']) | pandas.merge |
import numpy as np
from typing import Dict, Tuple, Optional, List
import pandas as pd
from torch import Tensor
from typing import Union
def create_neurons(neuron_list):
prinz_neurons = {
"LP": {
"LP_0": [100, 0, 8, 40, 5, 75, 0.05, 0.02], # this3 g_CaS g_A g_Kd
"LP_1": [100, 0, 6, 30, 5, 50, 0.05, 0.02], # this2 # KCa, H # this3
"LP_2": [100, 0, 10, 50, 5, 100, 0.0, 0.03],
"LP_3": [100, 0, 4, 20, 0, 25, 0.05, 0.03],
"LP_4": [100, 0, 6, 30, 0, 50, 0.03, 0.02], # this2
},
"PY": {
"PY_1": [200, 7.5, 0, 50, 0, 75, 0.05, 0.0],
# this3 # this3 g_Na, g_CaT, g_CaS
"PY_0": [100, 2.5, 2, 50, 0, 125, 0.05, 0.01], # this3
"PY_3": [400, 2.5, 2, 50, 0, 75, 0.05, 0.0],
# this3 # this3 g_leak, g_Kd, g_Na
"PY_5": [500, 2.5, 2, 40, 0, 125, 0.0, 0.02], # this2 # g_H, g_leak
"PY_2": [200, 10.0, 0, 50, 0, 100, 0.03, 0.0], # this3 # CaT Kd H
"PY_4": [500, 2.5, 2, 40, 0, 125, 0.01, 0.03], # this2
},
"PM": {
"PM_0": [400, 2.5, 6, 50, 10, 100, 0.01, 0.0], # this2 g_Na, KCa
"PM_3": [200, 5.0, 4, 40, 5, 125, 0.01, 0.0], # this3 CaT, g_A, g_Kd
"PM_4": [300, 2.5, 2, 10, 5, 125, 0.01, 0.0],
"PM_1": [100, 2.5, 6, 50, 5, 100, 0.01, 0.0], # this2
"PM_2": [200, 2.5, 4, 50, 5, 50, 0.01, 0.0], # this3
},
}
# Note (PM_0 or PM_1) / (LP_2) / (PY_0) is figure 5a in Prinz 2004.
# Note (PM_4) / (LP_3) / (PY_4) is figure 5b in Prinz 2004.
ret = []
for n in neuron_list:
membrane_area = np.asarray(n[2], dtype=np.float64)
pn = np.asarray(prinz_neurons[n[0]][n[1]], dtype=np.float64)
neuron = pn * membrane_area
ret.append(neuron)
return np.asarray(ret)
def membrane_conductances_replaced_with_defaults(circuit_parameters, defaults_dict):
default_neurons = create_neurons(defaults_dict["membrane_gbar"])
default_neurons = np.reshape(default_neurons, (1, 24))
type_names, cond_names = select_names()
type_names = type_names[:24]
cond_names = cond_names[:24]
default_neurons_pd = pd.DataFrame(default_neurons, columns=[type_names, cond_names])
for tn, cn in zip(type_names, cond_names):
if (tn, cn) in circuit_parameters:
default_neurons_pd.loc[0][tn, cn] = circuit_parameters[tn, cn] * 0.628e-3
return default_neurons_pd
def synapses_replaced_with_defaults(circuit_parameters, defaults_dict):
type_names, cond_names = select_names()
type_names = type_names[24:]
cond_names = cond_names[24:]
data_array = []
for tn, cn in zip(type_names, cond_names):
if (tn, cn) in circuit_parameters:
data_array.append(circuit_parameters[tn, cn])
data_array = np.asarray([data_array])
default_synapse_values = | pd.DataFrame(data_array, columns=[type_names, cond_names]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', | pd.Timestamp('2011-01-02') | pandas.Timestamp |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray
class TestSparseArrayConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_basic(self, kind):
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=kind)
result = SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_uses_first_kind(self, kind):
other = "integer" if kind == "block" else "block"
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=other)
result = SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize(
"other, expected_dtype",
[
# compatible dtype -> preserve sparse
(pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)),
# (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)),
# incompatible dtype -> Sparse[common dtype]
(pd.Series([1.5, 2.5, 3.5], dtype="float64"), | pd.SparseDtype("float64", 0) | pandas.SparseDtype |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import matplotlib.pyplot as plt
from matplotlib.colors import Colormap
import numpy as np
import pandas as pd
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.utils import check_array, check_consistent_length
from sklearn.preprocessing import Binarizer
from ..embed import selectSVD
from ..utils import import_graph, pass_to_ranks
def _check_common_inputs(
figsize=None,
height=None,
title=None,
context=None,
font_scale=None,
legend_name=None,
title_pad=None,
hier_label_fontsize=None,
):
# Handle figsize
if figsize is not None:
if not isinstance(figsize, tuple):
msg = "figsize must be a tuple, not {}.".format(type(figsize))
raise TypeError(msg)
# Handle heights
if height is not None:
if not isinstance(height, (int, float)):
msg = "height must be an integer or float, not {}.".format(type(height))
raise TypeError(msg)
# Handle title
if title is not None:
if not isinstance(title, str):
msg = "title must be a string, not {}.".format(type(title))
raise TypeError(msg)
# Handle context
if context is not None:
if not isinstance(context, str):
msg = "context must be a string, not {}.".format(type(context))
raise TypeError(msg)
elif context not in ["paper", "notebook", "talk", "poster"]:
msg = "context must be one of (paper, notebook, talk, poster), \
not {}.".format(
context
)
raise ValueError(msg)
# Handle font_scale
if font_scale is not None:
if not isinstance(font_scale, (int, float)):
msg = "font_scale must be an integer or float, not {}.".format(
type(font_scale)
)
raise TypeError(msg)
# Handle legend name
if legend_name is not None:
if not isinstance(legend_name, str):
msg = "legend_name must be a string, not {}.".format(type(legend_name))
raise TypeError(msg)
if hier_label_fontsize is not None:
if not isinstance(hier_label_fontsize, (int, float)):
msg = "hier_label_fontsize must be a scalar, not {}.".format(
type(legend_name)
)
raise TypeError(msg)
if title_pad is not None:
if not isinstance(title_pad, (int, float)):
msg = "title_pad must be a scalar, not {}.".format(type(legend_name))
raise TypeError(msg)
def _transform(arr, method):
if method is not None:
if method in ["log", "log10"]:
# arr = np.log(arr, where=(arr > 0))
# hacky, but np.log(arr, where=arr>0) is really buggy
arr = arr.copy()
if method == "log":
arr[arr > 0] = np.log(arr[arr > 0])
else:
arr[arr > 0] = np.log10(arr[arr > 0])
elif method in ["zero-boost", "simple-all", "simple-nonzero"]:
arr = pass_to_ranks(arr, method=method)
elif method == "binarize":
transformer = Binarizer().fit(arr)
arr = transformer.transform(arr)
else:
msg = "Transform must be one of {log, log10, binarize, zero-boost, simple-all, \
simple-nonzero, not {}.".format(
method
)
raise ValueError(msg)
return arr
def _process_graphs(
graphs, inner_hier_labels, outer_hier_labels, transform, sort_nodes
):
"""Handles transformation and sorting of graphs for plotting"""
for g in graphs:
check_consistent_length(g, inner_hier_labels, outer_hier_labels)
graphs = [_transform(arr, transform) for arr in graphs]
if inner_hier_labels is not None:
inner_hier_labels = np.array(inner_hier_labels)
if outer_hier_labels is None:
outer_hier_labels = np.ones_like(inner_hier_labels)
else:
outer_hier_labels = np.array(outer_hier_labels)
else:
inner_hier_labels = np.ones(graphs[0].shape[0], dtype=int)
outer_hier_labels = np.ones_like(inner_hier_labels)
graphs = [
_sort_graph(arr, inner_hier_labels, outer_hier_labels, sort_nodes)
for arr in graphs
]
return graphs
def heatmap(
X,
transform=None,
figsize=(10, 10),
title=None,
context="talk",
font_scale=1,
xticklabels=False,
yticklabels=False,
cmap="RdBu_r",
vmin=None,
vmax=None,
center=0,
cbar=True,
inner_hier_labels=None,
outer_hier_labels=None,
hier_label_fontsize=30,
ax=None,
title_pad=None,
sort_nodes=False,
**kwargs
):
r"""
Plots a graph as a color-encoded matrix.
Nodes can be grouped by providing `inner_hier_labels` or both
`inner_hier_labels` and `outer_hier_labels`. Nodes can also
be sorted by the degree from largest to smallest degree nodes.
The nodes will be sorted within each group if labels are also
provided.
Read more in the :ref:`tutorials <plot_tutorials>`
Parameters
----------
X : nx.Graph or np.ndarray object
Graph or numpy matrix to plot
transform : None, or string {'log', 'log10', 'zero-boost', 'simple-all', 'simple-nonzero'}
- 'log'
Plots the natural log of all nonzero numbers
- 'log10'
Plots the base 10 log of all nonzero numbers
- 'zero-boost'
Pass to ranks method. preserves the edge weight for all 0s, but ranks
the other edges as if the ranks of all 0 edges has been assigned.
- 'simple-all'
Pass to ranks method. Assigns ranks to all non-zero edges, settling
ties using the average. Ranks are then scaled by
:math:`\frac{rank(\text{non-zero edges})}{n^2 + 1}`
where n is the number of nodes
- 'simple-nonzero'
Pass to ranks method. Same as simple-all, but ranks are scaled by
:math:`\frac{rank(\text{non-zero edges})}{\text{# non-zero edges} + 1}`
- 'binarize'
Binarize input graph such that any edge weight greater than 0 becomes 1.
figsize : tuple of integers, optional, default: (10, 10)
Width, height in inches.
title : str, optional, default: None
Title of plot.
context : None, or one of {paper, notebook, talk (default), poster}
The name of a preconfigured set.
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
xticklabels, yticklabels : bool or list, optional
If list-like, plot these alternate labels as the ticklabels.
cmap : str, list of colors, or matplotlib.colors.Colormap, default: 'RdBu_r'
Valid matplotlib color map.
vmin, vmax : floats, optional (default=None)
Values to anchor the colormap, otherwise they are inferred from the data and
other keyword arguments.
center : float, default: 0
The value at which to center the colormap
cbar : bool, default: True
Whether to draw a colorbar.
inner_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes. If not None, will group the nodes
according to these labels and plot the labels on the marginal
outer_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes, ignored without ``inner_hier_labels``
If not None, will plot these labels as the second level of a hierarchy on the
marginals
hier_label_fontsize : int
Size (in points) of the text labels for the ``inner_hier_labels`` and
``outer_hier_labels``.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise will generate its own axes
title_pad : int, float or None, optional (default=None)
Custom padding to use for the distance of the title from the heatmap. Autoscales
if ``None``
sort_nodes : boolean, optional (default=False)
Whether or not to sort the nodes of the graph by the sum of edge weights
(degree for an unweighted graph). If ``inner_hier_labels`` is passed and
``sort_nodes`` is ``True``, will sort nodes this way within block.
**kwargs : dict, optional
additional plotting arguments passed to Seaborn's ``heatmap``
"""
_check_common_inputs(
figsize=figsize,
title=title,
context=context,
font_scale=font_scale,
hier_label_fontsize=hier_label_fontsize,
title_pad=title_pad,
)
# Handle ticklabels
if isinstance(xticklabels, list):
if len(xticklabels) != X.shape[1]:
msg = "xticklabels must have same length {}.".format(X.shape[1])
raise ValueError(msg)
elif not isinstance(xticklabels, bool):
msg = "xticklabels must be a bool or a list, not {}".format(type(xticklabels))
raise TypeError(msg)
if isinstance(yticklabels, list):
if len(yticklabels) != X.shape[0]:
msg = "yticklabels must have same length {}.".format(X.shape[0])
raise ValueError(msg)
elif not isinstance(yticklabels, bool):
msg = "yticklabels must be a bool or a list, not {}".format(type(yticklabels))
raise TypeError(msg)
# Handle cmap
if not isinstance(cmap, (str, list, Colormap)):
msg = "cmap must be a string, list of colors, or matplotlib.colors.Colormap,"
msg += " not {}.".format(type(cmap))
raise TypeError(msg)
# Handle center
if center is not None:
if not isinstance(center, (int, float)):
msg = "center must be a integer or float, not {}.".format(type(center))
raise TypeError(msg)
# Handle cbar
if not isinstance(cbar, bool):
msg = "cbar must be a bool, not {}.".format(type(center))
raise TypeError(msg)
arr = import_graph(X)
arr = _process_graphs(
[arr], inner_hier_labels, outer_hier_labels, transform, sort_nodes
)[0]
# Global plotting settings
CBAR_KWS = dict(shrink=0.7) # norm=colors.Normalize(vmin=0, vmax=1))
with sns.plotting_context(context, font_scale=font_scale):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
plot = sns.heatmap(
arr,
cmap=cmap,
square=True,
xticklabels=xticklabels,
yticklabels=yticklabels,
cbar_kws=CBAR_KWS,
center=center,
cbar=cbar,
ax=ax,
vmin=vmin,
vmax=vmax,
**kwargs
)
if title is not None:
if title_pad is None:
if inner_hier_labels is not None:
title_pad = 1.5 * font_scale + 1 * hier_label_fontsize + 30
else:
title_pad = 1.5 * font_scale + 15
plot.set_title(title, pad=title_pad)
if inner_hier_labels is not None:
if outer_hier_labels is not None:
plot.set_yticklabels([])
plot.set_xticklabels([])
_plot_groups(
plot,
arr,
inner_hier_labels,
outer_hier_labels,
fontsize=hier_label_fontsize,
)
else:
_plot_groups(plot, arr, inner_hier_labels, fontsize=hier_label_fontsize)
return plot
def gridplot(
X,
labels=None,
transform=None,
height=10,
title=None,
context="talk",
font_scale=1,
alpha=0.7,
sizes=(10, 200),
palette="Set1",
legend_name="Type",
inner_hier_labels=None,
outer_hier_labels=None,
hier_label_fontsize=30,
title_pad=None,
sort_nodes=False,
):
r"""
Plots multiple graphs on top of each other with dots as edges.
This function is useful for visualizing multiple graphs simultaneously.
The size of the dots correspond to the edge weights of the graphs, and
colors represent input graphs.
Read more in the :ref:`tutorials <plot_tutorials>`
Parameters
----------
X : list of nx.Graph or np.ndarray object
List of nx.Graph or numpy arrays to plot
labels : list of str
List of strings, which are labels for each element in X.
``len(X) == len(labels)``.
transform : None, or string {'log', 'log10', 'zero-boost', 'simple-all', 'simple-nonzero'}
- 'log'
Plots the natural log of all nonzero numbers
- 'log10'
Plots the base 10 log of all nonzero numbers
- 'zero-boost'
Pass to ranks method. preserves the edge weight for all 0s, but ranks
the other edges as if the ranks of all 0 edges has been assigned.
- 'simple-all'
Pass to ranks method. Assigns ranks to all non-zero edges, settling
ties using the average. Ranks are then scaled by
:math:`\frac{rank(\text{non-zero edges})}{n^2 + 1}`
where n is the number of nodes
- 'simple-nonzero'
Pass to ranks method. Same as simple-all, but ranks are scaled by
:math:`\frac{rank(\text{non-zero edges})}{\text{# non-zero edges} + 1}`
- 'binarize'
Binarize input graph such that any edge weight greater than 0 becomes 1.
height : int, optional, default: 10
Height of figure in inches.
title : str, optional, default: None
Title of plot.
context : None, or one of {paper, notebook, talk (default), poster}
The name of a preconfigured set.
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
palette : str, dict, optional, default: 'Set1'
Set of colors for mapping the ``hue`` variable. If a dict, keys should
be values in the hue variable
alpha : float [0, 1], default : 0.7
Alpha value of plotted gridplot points
sizes : length 2 tuple, default: (10, 200)
Min and max size to plot edge weights
legend_name : string, default: 'Type'
Name to plot above the legend
inner_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes. If not None, will group the nodes
according to these labels and plot the labels on the marginal
outer_hier_labels : array-like, length of X's first dimension, default: None
Categorical labeling of the nodes, ignored without ``inner_hier_labels``
If not None, will plot these labels as the second level of a hierarchy on the
marginals
hier_label_fontsize : int
Size (in points) of the text labels for the ``inner_hier_labels`` and
``outer_hier_labels``.
title_pad : int, float or None, optional (default=None)
Custom padding to use for the distance of the title from the heatmap. Autoscales
if ``None``
sort_nodes : boolean, optional (default=False)
Whether or not to sort the nodes of the graph by the sum of edge weights
(degree for an unweighted graph). If ``inner_hier_labels`` is passed and
``sort_nodes`` is ``True``, will sort nodes this way within block.
"""
_check_common_inputs(
height=height,
title=title,
context=context,
font_scale=font_scale,
hier_label_fontsize=hier_label_fontsize,
title_pad=title_pad,
)
if isinstance(X, list):
graphs = [import_graph(x) for x in X]
else:
msg = "X must be a list, not {}.".format(type(X))
raise TypeError(msg)
if labels is None:
labels = np.arange(len(X))
check_consistent_length(X, labels)
graphs = _process_graphs(
X, inner_hier_labels, outer_hier_labels, transform, sort_nodes
)
if isinstance(palette, str):
palette = sns.color_palette(palette, desat=0.75, n_colors=len(labels))
dfs = []
for idx, graph in enumerate(graphs):
rdx, cdx = np.where(graph > 0)
weights = graph[(rdx, cdx)]
df = pd.DataFrame(
np.vstack([rdx + 0.5, cdx + 0.5, weights]).T,
columns=["rdx", "cdx", "Weights"],
)
df[legend_name] = [labels[idx]] * len(cdx)
dfs.append(df)
df = pd.concat(dfs, axis=0)
with sns.plotting_context(context, font_scale=font_scale):
sns.set_style("white")
plot = sns.relplot(
data=df,
x="cdx",
y="rdx",
hue=legend_name,
size="Weights",
sizes=sizes,
alpha=alpha,
palette=palette,
height=height,
facet_kws={
"sharex": True,
"sharey": True,
"xlim": (0, graph.shape[0] + 1),
"ylim": (0, graph.shape[0] + 1),
},
)
plot.ax.axis("off")
plot.ax.invert_yaxis()
if title is not None:
if title_pad is None:
if inner_hier_labels is not None:
title_pad = 1.5 * font_scale + 1 * hier_label_fontsize + 30
else:
title_pad = 1.5 * font_scale + 15
plt.title(title, pad=title_pad)
if inner_hier_labels is not None:
if outer_hier_labels is not None:
_plot_groups(
plot.ax,
graphs[0],
inner_hier_labels,
outer_hier_labels,
fontsize=hier_label_fontsize,
)
else:
_plot_groups(
plot.ax, graphs[0], inner_hier_labels, fontsize=hier_label_fontsize
)
return plot
def pairplot(
X,
labels=None,
col_names=None,
title=None,
legend_name=None,
variables=None,
height=2.5,
context="talk",
font_scale=1,
palette="Set1",
alpha=0.7,
size=50,
marker=".",
diag_kind="auto",
):
r"""
Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each dimension
in data will by shared in the y-axis across a single row and in the x-axis
across a single column.
The off-diagonal Axes show the pairwise relationships displayed as scatterplot.
The diagonal Axes show the univariate distribution of the data for that
dimension displayed as either a histogram or kernel density estimates (KDEs).
Read more in the :ref:`tutorials <plot_tutorials>`
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
labels : array-like or list, shape (n_samples), optional
Labels that correspond to each sample in X.
col_names : array-like or list, shape (n_features), optional
Names or labels for each feature in X. If not provided, the default
will be `Dimension 1, Dimension 2, etc`.
title : str, optional, default: None
Title of plot.
legend_name : str, optional, default: None
Title of the legend.
variables : list of variable names, optional
Variables to plot based on col_names, otherwise use every column with
a numeric datatype.
height : int, optional, default: 10
Height of figure in inches.
context : None, or one of {paper, notebook, talk (default), poster}
The name of a preconfigured set.
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
palette : str, dict, optional, default: 'Set1'
Set of colors for mapping the ``hue`` variable. If a dict, keys should
be values in the hue variable.
alpha : float, optional, default: 0.7
Opacity value of plotter markers between 0 and 1
size : float or int, optional, default: 50
Size of plotted markers.
marker : string, optional, default: '.'
Matplotlib style marker specification
https://matplotlib.org/api/markers_api.html
"""
_check_common_inputs(
height=height,
title=title,
context=context,
font_scale=font_scale,
legend_name=legend_name,
)
# Handle X
if not isinstance(X, (list, np.ndarray)):
msg = "X must be array-like, not {}.".format(type(X))
raise TypeError(msg)
# Handle Y
if labels is not None:
if not isinstance(labels, (list, np.ndarray)):
msg = "Y must be array-like or list, not {}.".format(type(labels))
raise TypeError(msg)
elif X.shape[0] != len(labels):
msg = "Expected length {}, but got length {} instead for Y.".format(
X.shape[0], len(labels)
)
raise ValueError(msg)
# Handle col_names
if col_names is None:
col_names = ["Dimension {}".format(i) for i in range(1, X.shape[1] + 1)]
elif not isinstance(col_names, list):
msg = "col_names must be a list, not {}.".format(type(col_names))
raise TypeError(msg)
elif X.shape[1] != len(col_names):
msg = "Expected length {}, but got length {} instead for col_names.".format(
X.shape[1], len(col_names)
)
raise ValueError(msg)
# Handle variables
if variables is not None:
if len(variables) > len(col_names):
msg = "variables cannot contain more elements than col_names."
raise ValueError(msg)
else:
for v in variables:
if v not in col_names:
msg = "{} is not a valid key.".format(v)
raise KeyError(msg)
else:
variables = col_names
df = pd.DataFrame(X, columns=col_names)
if labels is not None:
if legend_name is None:
legend_name = "Type"
df_labels = | pd.DataFrame(labels, columns=[legend_name]) | pandas.DataFrame |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
from logbook import Logger
import pandas as pd
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR,
MARKETS_CLOSED
)
log = Logger('Realtime Clock')
class RealtimeClock(object):
"""
Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
The key difference between the two is that the RealtimeClock's event
emission is synchronized to the (broker's) wall time clock, while
MinuteSimulationClock yields a new event on every iteration (regardless of
wall clock).
The :param:`time_skew` parameter represents the time difference between
the Broker and the live trading machine's clock.
"""
def __init__(self,
sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission,
time_skew=pd.Timedelta("0s"),
is_broker_alive=None,
execution_id=None,
stop_execution_callback=None):
today = pd.to_datetime('now', utc=True).date()
beginning_of_today = pd.to_datetime(today, utc=True)
self.sessions = sessions[(beginning_of_today <= sessions)]
self.execution_opens = execution_opens[(beginning_of_today <= execution_opens)]
self.execution_closes = execution_closes[(beginning_of_today <= execution_closes)]
self.before_trading_start_minutes = before_trading_start_minutes[
(beginning_of_today <= before_trading_start_minutes)]
self.minute_emission = minute_emission
self.time_skew = time_skew
self.is_broker_alive = is_broker_alive or (lambda: True)
self._last_emit = None
self._before_trading_start_bar_yielded = False
self._execution_id = execution_id
self._stop_execution_callback = stop_execution_callback
def __iter__(self):
# yield from self.work_when_out_of_trading_hours()
# return
if not len(self.sessions):
return
for index, session in enumerate(self.sessions):
self._before_trading_start_bar_yielded = False
yield session, SESSION_START
if self._stop_execution_callback:
if self._stop_execution_callback(self._execution_id):
break
while self.is_broker_alive():
if self._stop_execution_callback: # put it here too, to break inner loop as well
if self._stop_execution_callback(self._execution_id):
break
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
if (server_time >= self.before_trading_start_minutes[index] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
yield server_time, BEFORE_TRADING_START_BAR
elif (server_time < self.execution_opens[index].tz_localize('UTC') and index == 0) or \
(self.execution_closes[index - 1].tz_localize('UTC') <= server_time <
self.execution_opens[index].tz_localize('UTC')):
# sleep anywhere between yesterday's close and today's open
sleep(1)
# self._last_emit = server_time
# yield server_time, MARKETS_CLOSED
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, MARKETS_CLOSED
# if self.minute_emission:
# yield server_time, MINUTE_END
else:
sleep(1)
elif (self.execution_opens[index].tz_localize('UTC') <= server_time <
self.execution_closes[index].tz_localize('UTC')):
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
else:
sleep(1)
elif server_time == self.execution_closes[index].tz_localize('UTC'):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
yield server_time, SESSION_END
break
elif server_time > self.execution_closes[index].tz_localize('UTC'):
break
else:
# We should never end up in this branch
raise RuntimeError("Invalid state in RealtimeClock")
def work_when_out_of_trading_hours(self):
"""
a debugging method to work while outside trading hours, so we are still able to make the engine work
:return:
"""
from datetime import timedelta
num_days = 5
from trading_calendars import get_calendar
self.sessions = get_calendar("NYSE").sessions_in_range(
str(pd.to_datetime('now', utc=True).date() - timedelta(days=num_days * 2)),
str( | pd.to_datetime('now', utc=True) | pandas.to_datetime |
import enum
import numpy as np
import pandas as pd
from implicit.als import AlternatingLeastSquares
from functools import partial
from itertools import zip_longest
from sklearn.preprocessing import StandardScaler
from sklearn.base import TransformerMixin
from typing import List
# these must be initialized
userid_to_id = {}
itemid_to_id = {}
id_to_userid = {}
id_to_itemid = {}
class BColor(enum.Enum):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def cprint(msg, color: BColor):
print(f'{color}{msg}{BColor.ENDC}')
def popularity_measure(source, fields: List[str], k=5000, beta: List[float] = None, add_target=None, scaler=None):
""" ะ ะฐััะตั ะพัะตะฝะบะธ ะฒะฐะถะฝะพััะธ ัะพะฒะฐัะฐ ะฒ ะฟะพะบัะฟะบะต ะธ ะพัะฑะพั ัะพะฟ K ะฝะฐะธะฑะพะปะตะต ะฟะพะฟัะปััะฝัั
ัะพะฒะฐัะพะฒ
:param source - ะธัั
ะพะดะฝัะต ะดะฐะฝะฝัะต
:param fields - ะฟัะธะทะฝะฐะบะธ, ะฟะพ ะบะพัะพััะผ ะธะทะผะตััะตััั ะผะตัะฐ ะฒะฐะถะฝะพััะธ ัะพะฒะฐัะฐ
:param k - ะบะพะปะธัะตััะฒะพ ัะพะฒะฐัะพะฒ, ะพัะฑะธัะฐะตะผัั
ะฒ ัะพะฟ
:param beta - ะผะฝะพะถะธัะตะปะธ ะทะฝะฐัะธะผะพััะธ ะดะปั ะบะฐะถะดะพะณะพ ะฟัะธะทะฝะฐะบะฐ ะฒ ะพัะตะฝะบะต
:param add_target - ะฝะฐะทะฒะฐะฝะธะต ัะธะฝะฐะปัะฝะพะณะพ ะฟัะธะทะฝะฐะบะฐ. ะัะธะทะฝะฐะบ ะฝะต ะดะพะฑะฐะฒะปัะตััั, ะตัะปะธ target = None
:param scaler - ะบะปะฐัั ะผะฐัััะฐะฑะธัะพะฒะฐะฝะธั ะดะฐะฝะฝัั
"""
b = [1.] * len(fields) if beta is None else np.array(beta)
assert len(fields) == len(b), '`fields` and `beta` dimensions must equal'
assert issubclass(StandardScaler, TransformerMixin) or scaler is None, 'scaler must be a subclass of TransformerMixin'
_df = source[['item_id']].copy()
prepared = scaler().fit_transform(source[fields]) * b if scaler else source[fields] * b
values = np.linalg.norm(prepared, ord=2, axis=1)
_df['popularity'] = values
if add_target:
source.loc[:, add_target] = values
popularity = _df.groupby('item_id')['popularity'].sum()
return popularity.sort_values(ascending=False).head(k).index.tolist()
def check_model(uim, mdl_params, rec_params, res, ttl='als'):
"""
:param uim: user-item matrix
:param mdl_params: model init parameters
:param rec_params: recommendation parameters
:param res: true values, including user_id
:param ttl: model title
:return: predicted values (DataFrame)
"""
mdl = AlternatingLeastSquares(**mdl_params)
mdl.fit(uim.T, show_progress=False)
# rec_params['user_items'] = uim
res[ttl] = res['user_id'].apply(partial(recommender, mdl=mdl, params=rec_params))
return mdl
def recommender(user_id, mdl, params):
""" ะัะตะดัะบะฐะทะฐัะตะปั-ะธะฝัะตัะฟัะตัะฐัะพั """
uid = userid_to_id.get(user_id, None)
if uid is None:
return list()
rec_score = mdl.recommend(userid_to_id[user_id], **params)
return [id_to_itemid[rec[0]] for rec in rec_score]
def precision_at_k(recommended_list, bought_list, k=5):
""""""
flags = np.sum(np.isin(bought_list, recommended_list[:k]))
return flags / k
def ap_k(recommended_list, bought_list, k=5):
""""""
flags = np.isin(recommended_list, bought_list)
if np.sum(flags) == 0:
return 0
func = partial(precision_at_k, recommended_list, bought_list)
rel_items = np.arange(1, k + 1)[flags[:k]]
return np.sum(list(map(func, rel_items))) / np.sum(flags)
def calc_metric(metric_func, source: pd.DataFrame):
""" ะะพะดััะตั ะผะตััะธะบะธ
:param metric_func - ััะฝะบัะธั ะธะทะผะตัะตะฝะธั ะผะตััะธะบะธ. ะะตัะฒัะน ะฐัะณัะผะตะฝั - ัะตะบะพะผะตะฝะดะฐัะธะธ, ะฒัะพัะพะน - ะฐะบััะฐะปัะฝัะต ะทะฝะฐัะตะฝะธั
:param source - ะดะฐะฝะฝัะต ะดะปั ะฟะพะดััะตัะฐ ะผะตััะธะบะธ
"""
def metric_wrapper(pred, act):
return metric_func(pred, act) if len(pred) != 0 else 0
metric = pd.DataFrame()
for col in source.columns:
if col == 'user_id':
metric[col] = source[col]
elif col == 'actual':
continue
else:
metric[col] = source[[col, 'actual']].apply(lambda row: metric_wrapper(*row.values), axis=1)
return metric
def compare_metrics(res, saveto=None):
""" Build dataframe with metrics comparison """
pr_at_k = calc_metric(partial(precision_at_k, k=5), res)
ap_at_k = calc_metric(lambda pred, act: ap_k(pred, act, k=min(5, len(pred))), res)
smr = pd.DataFrame([pr_at_k.mean(), ap_at_k.mean()], index=['precision@k', 'map@k']).drop(columns='user_id')
if saveto:
smr.T.to_csv(saveto)
return smr
def get_nearest(mdl, elem_id, k, mode):
""" Get top K the nearest users/items to the given
:param mdl: ALS fitted model
:param elem_id: real user/item id
:param k: number of items to find
:param mode: users/items return switcher
:return: list of similar users/items depend on mode
"""
if (mode == 'user') or (mode == 0):
return [id_to_userid[idx] for idx, _ in mdl.similar_users(userid=userid_to_id[elem_id], N=k + 1)[1:]]
if (mode == 'item') or (mode == 1):
return [id_to_itemid[idx] for idx, _ in mdl.similar_items(itemid=itemid_to_id[elem_id], N=k + 1)[1:]]
return []
def filter_top_for_users(items, users, measure='popularity', k=5):
""" Get users top purchases
:param items: data grouped by users and items
:param users: user ids array
:param measure: ranging measure
:param k: number of items to find
:return ungrouped dataframe
"""
filter_mask = (items['user_id'].isin(users)) & (items['item_id'] != -1)
return items[filter_mask].sort_values(by=['user_id', measure], ascending=[True, False]).groupby('user_id').head(k)
def basic_filter(items, k, placeholder=()):
""" ะะท ัะฟะธัะบะฐ ัะพะฒะฐัะพะฒ ะฑะตัะตะผ K ะฟะตัะฒัั
, ะพัะปะธัะฝัะน ะพั ัะพะฒะฐัะฐ-ะทะฐะณะปััะบะธ, ะฐ ะตัะปะธ ัะฐะบะธั
ะฝะตั, ัะพ ะฒะพะทะฒัะฐัะฐะตะผ ะทะฐะณะปััะบั """
return result[:k] if (result := [item for item in items if item != -1]) else placeholder
def check_items_count(items, k):
""" Check number of predictions for each user
:param items: Series with users predictions. User ids must be in index
:param k: number of required predictions
:return: corrected predictions
"""
# ะตัะปะธ ะฟะพั
ะพะถะธะต ะฟะพะปัะทะพะฒะฐัะตะปะธ ะผะฐะปะพ ะฟะพะบัะฟะฐะปะธ, ัะพ ัะตะบะพะผะตะฝะดะฐัะธะน ะผะพะถะตั ะฝะต ั
ะฒะฐัะธัั
sizes = items.apply(len)
if (low_pred := items.index[sizes < k]).any():
cprint(f"Some users have less than {k} predictions!", BColor.WARNING)
print(low_pred.tolist())
# ะบะฐะบะฐั-ัะพ ะพะฑัะฐะฑะพัะบะฐ ะฟะพะดะพะฑะฝัั
ัะธััะฐัะธะน
if (nan_pred := items.index[sizes == 0]).any():
cprint(f"Some users have no predictions at all!", BColor.FAIL)
print(nan_pred.tolist())
# ะบะฐะบะฐั-ัะพ ะพะฑัะฐะฑะพัะบะฐ ะฟะพะดะพะฑะฝัั
ัะธััะฐัะธะน
return items
def agg_func(src):
""" ะะณะณัะตะณะฐัะพั ะฟะพั
ะพะถะธั
ัะพะฒะฐัะพะฒ: ะดะปั ะบะฐะถะดะพะณะพ ัะพะฒะฐัะฐ ะฑะตัะตะผ ะฒะตัั
ะฝะธะต ะฒ ะพัะตัะตะดะธ ะตัะปะธ ะพะฝะธ ะตัะต ะฝะต ะฒ ะฟะพะดะฑะพัะบะต """
arr = np.array(list(zip_longest(*src)), dtype='float')
res = []
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
if np.isnan(item := arr[row, col]):
continue
if item not in res:
res.append(item)
else:
for col_item in arr[row + 1:, col]:
if not np.isnan(col_item) and col_item not in res:
res.append(col_item)
break
return np.array(res, dtype='int')
def similar_item_recommend(mdl, users, data, measure='popularity', k=5,
filter_func=basic_filter, placeholder=(), title='similar_items'):
""" Recommend similar items based on top K purchases
:param mdl: ALS fitted model
:param users: user ids to recommend for
:param data: source dataset
:param measure: target field in the dataset
:param k: number of items to recommend
:param filter_func: additional filters like func(items: list) -> list
:param placeholder: value to use if no predictions available
:param title: name of target column
:return: list of predictions for given user
"""
# ะฟะพ userid ะฟะพะปััะฐะตะผ ัะพะฟ ะฟะพะบัะฟะพะบ ะฟะพะปัะทะพะฒะฐัะตะปะตะน
group_items = data.groupby(['user_id', 'item_id'])[measure].sum().reset_index()
user_item_top = filter_top_for_users(group_items, users, measure, k)
# ะดะปั ะบะฐะถะดะพะณะพ ัะพะฒะฐัะฐ ะธะท ัะพะฟะฐ ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะฐั
ะพะดะธะผ ะฑะปะธะถะฐะนัะธะต K ัะพะฒะฐัะพะฒ ะธะท ัะพะฟ5ะบ
user_item_top[title] = user_item_top['item_id'].apply(lambda x: get_nearest(mdl, x, k, 'item'))
# ะดะปั ะบะฐะถะดะพะณะพ ัะพะฒะฐัะฐ ะธัะตัะฐัะธะฒะฝะพ ะฑะตัะตะผ ะตะณะพ ะฑะปะธะถะฐะนัะธะน, ะตัะปะธ ะตะณะพ ะตัะต ะฝะตั ะฒ ะฟัะตะดะปะพะถะบะต,
preds = user_item_top.groupby('user_id')[title].agg(agg_func)
# ัะตะฟะตัั ะผะพะถะตะผ ะดะพะฟะพะปะฝะธัะตะปัะฝะพ ะพััะธะปัััะพะฒะฐัั ะฟะพะปััะตะฝะฝัะต ัะฟะธัะบะธ
# ะตัะปะธ ัะธะปััั ะฝะต ัะบะฐะทะฐะฝ - ะฑะตัะตะผ ะฟะตัะฒัะต ะ ัะพะฒะฐัะพะฒ
preds = preds.apply(lambda val: filter_func(val, k, placeholder) if filter_func and callable(filter_func) else lambda x: x[:k])
# ะดะพะฑะฐะฒะปัะตะผ ัะตั
, ะดะปั ะบะพะณะพ ะฟัะตะดัะบะฐะทะฐะฝะธั ะพััััััะฒััั
items = pd.Series([np.array(placeholder)] * len(users), index=users, name=title)
items.update(preds)
# ะฟัะพะฒะตััะตะผ ะบะพะปะธัะตััะฒะพ ะฟัะตะดัะบะฐะทะฐะฝะธะน
items = check_items_count(items, k)
return items
def similar_user_recommend(mdl, users, data, measure='popularity', k=5,
filter_func=basic_filter, placeholder=(), title='similar_users'):
""" Recommend items based on similar user purchases
:param mdl: ALS fitted model
:param users: user ids to recommend for
:param data: source dataset
:param measure: target field in the dataset
:param k: number of items to recommend
:param filter_func: additional filters like func(items: list) -> list
:param placeholder: value to use if no predictions available
:param title: name of target column
:return: list of predictions for given user
"""
# ะดะปั ะบะฐะถะดะพะณะพ ัะทะตัะฐ ะธะท ะทะฐะฟัะพัะฐ ะฝะฐั
ะพะดะธะผ K ะฑะปะธะถะฐะนัะธั
sim = | pd.Series(users) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tests date parsing functionality for all of the
parsers defined in parsers.py
"""
from distutils.version import LooseVersion
from datetime import datetime
import pytest
import numpy as np
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas as pd
import pandas.io.parsers as parsers
import pandas.tseries.tools as tools
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, DatetimeIndex
from pandas import compat
from pandas.compat import parse_date, StringIO, lrange
from pandas.tseries.index import date_range
class ParseDatesTests(object):
def test_separator_date_conflict(self):
# Regression test for gh-4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from lifelines.datasets import load_waltons
from lifelines import KaplanMeierFitter
from scipy.stats import ranksums
from scipy import interpolate
from operator import add
import numpy as np
import pandas as pd
import os.path
import datetime
import requests
import glob
import json
import os
import warnings
warnings.filterwarnings('ignore')
summary_order = [
"_study_count",
"_subject_count",
"_demographic_count",
"_visit_count",
"_sample_count",
"_summary_lab_result_count",
"_treatment_count"
]
summary_count_headers = {
"_subject_count": "Cases",
"_study_count": "Studies",
"_demographic_count": "Demographic records",
"_visit_count": "Visit records",
"_sample_count": "Samples",
"_summary_lab_result_count": "Lab Results records",
"_treatment_count": "Drug records"
}
chunk = 50
class MetricsTable(dict):
''' Represent metrics tables in HTML format for visualization '''
def _repr_html_(self):
html = []
html.append("<table style>")
html.append("<thead>")
html.append("<th>Metric</th>")
html.append("<th>Value</th>")
html.append("</thead>")
for key in self:
html.append("<tr>")
html.append("<td>%s</td>" % key)
html.append("<td>%s</td>" % self[key])
html.append("<tr>")
html.append("</table>")
return ''.join(html)
def add_keys(filename):
''' Get auth from our secret keys '''
global auth
json_data = open(filename).read()
keys = json.loads(json_data)
auth = requests.post('https://aids.niaiddata.org/user/credentials/cdis/access_token', json=keys)
return auth
def get_keys():
''' Get auth from internal service '''
global auth
auth = requests.get('http://fence-service.default.svc.cluster.local/internal/access_token')
def query_api(query_txt, variables=None):
''' Request results for a specific query '''
auth = add_keys('/home/jovyan/pd/credentials.json')
if variables == None:
query = {'query': query_txt}
else:
query = {'query': query_txt, 'variables': variables}
output = requests.post('https://aids.niaiddata.org/api/v0/submission/graphql', headers={'Authorization': 'bearer ' + auth.json()['access_token']}, json=query).text
data = json.loads(output)
if 'errors' in data:
print(data)
return data
def get_studies(project_id):
''' Get list of studies for specific project'''
query_txt = """{ study(project_id: "%s"){ submitter_id }}""" % project_id
data = query_api(query_txt)
studies = []
for study in data['data']['study']:
studies.append(study['submitter_id'])
return studies
def get_projects():
''' Query list of projects '''
query_txt = """query Project { project(first:0) {project_id}} """
data = query_api(query_txt)
projects = []
for pr in data['data']['project']:
projects.append(pr['project_id'])
projects = sorted(projects)
return projects
def query_summary_counts(projects=None):
''' Query summary counts for each data type'''
if projects == None:
projects = get_projects()
elif not isinstance(projects,list):
projects = [projects]
dftotal = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pathlib import Path
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from mcw_readers.interfaces.lut import lut
from mcw_readers.interfaces.wb_parsers import wb_parser
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
def get_parser():
"""get cli parse"""
parser_desc = 'parse multiple neuroscores as defined by a spec file'
epilog = """
The spec file is a tsv. Each line represents a timepoint for a participant.
The timepoints must be matched with the exams (A, B, or C) in neuroscore.
Here are the tsv columns names and the information they represent:
record_id
The redcap record_id for the participant.
redcap_repeat_instance
The redcap repeat instance.
neuroscore
The full path to the Neuroscore xlsm file.
exam
The exam (A, B, or C) matching with the redcap_repeat_instance
"""
parser = ArgumentParser(description=parser_desc,
formatter_class=RawDescriptionHelpFormatter,
epilog=epilog)
parser.add_argument('--spec', action='store', required=True,
help='the tsv specification')
return parser
def main():
with pkg_resources.path('mcw_readers.data',
'epilepsy_lut.xlsx') as epilepsy_excel:
epilepsy_lut = lut('epilepsy', str(epilepsy_excel))
parser = get_parser()
args = parser.parse_args()
spec = pd.read_csv(args.spec, sep='\t')
spec.exam = spec.exam.str.lower()
spec['exam_num'] = spec.exam.map({'a': 1, 'b': 2})
N = spec.shape[0]
all_results = []
all_new_lines = []
all_missing_lines = []
for i, row in enumerate(spec.itertuples(), start=1):
print(f'Working on row {i} / {N}')
epilepsy_parser = wb_parser(row.neuroscore, verbose=False)
results, new_lines, missing_lines = epilepsy_parser.parse_data(
epilepsy_lut, row.exam_num)
# adjust results
results = | pd.DataFrame(results) | pandas.DataFrame |
import glob
import pandas as pd
COLUMNS_MAPPING = {
"Nยฐ de l'OM": "num_om",
"Mode de rรฉservation (online, offline)": "mode_reservation",
"Structure": "structure",
"Date de dรฉbut mission": "date_debut_mission",
"Date de fin mission": "date_fin_mission",
"Statut": "status",
"Lieu de dรฉpart": "lieu_depart",
"Lieu d'arrivรฉe": "lieu_arrivee",
"Type de prestation": "prestation_type",
"Lieu รฉtape": "lieu_etape",
"Nombre de prestations": "nombre_prestation",
"Nombre d'OM": "nombre_om",
"Nombre de jours": "nombre_jours",
"Coรปt des prestations": "cout",
}
REVERSE_COLUMNS_MAPPING = {v: k for k, v in COLUMNS_MAPPING.items()}
def get_data(dir_path, prestation_types=None):
data = load_data(dir_path)
data = clean_data(data, prestation_types)
return data
def load_data(dir_path):
datas = []
for filepath in glob.glob(dir_path + "/Reportings_*.csv"):
dataset = pd.read_csv(filepath, delimiter=";")
datas.append(dataset)
data = | pd.concat(datas, ignore_index=True) | pandas.concat |
import sys
import ast
import xml.etree.ElementTree as ET
def parse_properties(fn):
et = ET.parse(fn)
test_cases = list(et.getroot().iter('testcase'))
all_properties = {}
for i, test_case in enumerate(test_cases):
name = '{file}::{name}'.format(**test_case.attrib)
properties = {
prop.attrib['name']: ast.literal_eval(prop.attrib['value'])
for prop in test_case.iter('property')
}
all_properties[name] = properties
fmt = '{:<150}' + '\t{}' * len(properties)
if i == 0:
print(fmt.format(name, *properties.keys()))
sorted_keys = list(sorted(properties.keys()))
if not sorted_keys:
sorted_keys = ['total_threads', 'dangling_threads',
'total_sockets', 'open_sockets']
print(fmt.format(name, *[properties.get(key, 'N/A')
for key in sorted_keys]))
return all_properties
if __name__ == '__main__':
properties = parse_properties(sys.argv[1])
try:
import pandas as pd
except ImportError:
...
else:
df = | pd.DataFrame(properties) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implements the global import of all data
Created on Mon Dec 26 20:51:08 2016
@author: rwilson
"""
import numpy as np
import glob
import re
import os
import csv
from itertools import repeat
import pandas as pd
import h5py
from dateutil.parser import parse
import codecs
from scipy.io import loadmat
from scipy import signal
import shelve
import pickle
import numpy as np
import pandas as pd
class utilities:
'''Collection of functions intended for data related processes.
'''
def DB_group_names(Database, group_name = None):
'''Read in group names found within group. If group is not provided the
upper folder structure will be read from.
Parameters
----------
Database : str
Relative location of database
group_name : str
The expected attribute name/s
Returns
-------
group_names : list
Group names found within the group
notes
-----
Add some additional error checks
'''
with h5py.File(Database, 'r') as h5file:
if group_name is not None:
group = h5file.get(group_name)
group_names = [key for key in group.keys()]
else:
group_names = [key for key in h5file.keys()]
return group_names
def DB_attrs_save(Database, dictionary):
'''Save attribute to database head.
Parameters
----------
Database : str
Relative location of database
dictionary : dict
Dictionary of attributes
notes
-----
Add some additional error checks
'''
print('* The following %s attributes will be updated' % Database)
with h5py.File(Database, 'r+') as h5file:
for key,item in zip(dictionary.keys(), dictionary.values()):
print('Key:', key,'| item:', item)
h5file.attrs[key] = item
def DB_attrs_load(Database, attrs_names):
'''Read attribute from database head.
Parameters
----------
Database : str
Relative location of database
attrs_names : list(str)
The expected attribute name/s
Returns
-------
dict_attri : dict
The returned dictionary of attribute/s from the database
notes
-----
Add some additional error checks
'''
dict_attrs = {}
with h5py.File(Database, 'r') as h5file:
for attrs_name in attrs_names:
# Load the database
attrs = h5file.attrs[attrs_name]
dict_attrs[attrs_name] = attrs
return dict_attrs
def DB_pd_data_load(Database, group, cols = None, whereList = None):
'''Loads in a pandas dataframe stored in group from the Database.
Parameters
----------
Database : str
Relative location of database
group : str
The expected group name
cols : list(str) / list(int)
If not None, will limit the return columns, only applicable for ``table``
format database. For ``fixed`` format database only int accepted
whereList : list of Term (or convertable) objects or slice(from, to)
The conditional import of data, example ['index>11', 'index<20'],
only applicable for ``table`` format database. For ``fixed`` format
database only a slice object is applicable and will use the row index
numbers not the index values (i.e. df.iloc vs df.loc)
Returns
-------
group_df : DataFrame
The PV data stored in the group ``PVdata`` as a pandas dataframe
TSsurvey = pd.read_hdf(h5file, 'survey20180312093545',
columns=[(1,1), (1,2)], # Load specific columns
where = ['index>11', 'index<20']) # Load index 11 -> 20
'''
with pd.HDFStore(Database, 'r+') as h5file:
# Check that the expected group name is found in the database
# group_names = [key for key in h5file.keys()]
# expected_group_name = '/'+group
# if expected_group_name not in group_names:
# raise KeyError('The %s group was not found within the %s database.' \
# %(expected_group_name, Database))
# Load the database
try:
group_df = pd.read_hdf(h5file, group, columns = cols, where = whereList)
except TypeError:
with pd.HDFStore(Database, 'r+') as h5file:
group_df = | pd.read_hdf(h5file, group) | pandas.read_hdf |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if | needs_i8_conversion(o) | pandas.core.dtypes.common.needs_i8_conversion |
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import pandas as pd
from keras import utils, callbacks
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Flatten, Embedding, Dropout, Concatenate, Dot,Reshape,Merge
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, SGD, RMSprop,Adamax
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import matplotlib.pyplot as plt
import math
from sklearn import preprocessing
from sklearn.ensemble import ExtraTreesClassifier,ExtraTreesRegressor,RandomForestClassifier,RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Lasso
nbatch_size = 512
def rebuild_data():
"""
ๆธ
ๆด้ๆฉ็นๅพๆฐๆฎ
"""
user_header = ['user_id','gender', 'age', 'job']
user_df = pd.read_csv('./data/ml-1m/users.dat', sep='::', names=user_header, usecols=[0, 1, 2, 3], engine = 'python')
user_df.set_index(['user_id'], inplace = False)
movie_header = ['movie_id', 'title','category']
movie_df = pd.read_csv('./data/ml-1m/movies.dat', sep='::', names=movie_header, usecols=[0, 1, 2], engine = 'python')
movie_df.set_index(['movie_id'], inplace = False)
rating_header = ['user_id', 'movie_id', 'rating', 'timestamp']
rating_df = pd.read_csv('./data/ml-1m/ratings.dat',sep='::', names=rating_header, engine = 'python')[:100000]
rating_user = [user_df[user_df['user_id'] == mid].values[0] for uid, mid, r, _ in rating_df.values]
rating_movie = [movie_df[movie_df['movie_id'] == mid].values[0] for uid, mid, r, _ in rating_df.values]
user_df = pd.DataFrame(rating_user, index=None, columns=['user_id', 'gender', 'age', 'job'])
movie_df = pd.DataFrame(rating_movie, index=None, columns=['movie_id', 'title', 'category'])
rating_df = rating_df.rating
pd.to_pickle(user_df, './data/ml-1m/user_pick')
pd.to_pickle(movie_df, './data/ml-1m/movie_pick')
pd.to_pickle(rating_df, './data/ml-1m/rating_pick')
print(user_df.shape,movie_df.shape,rating_df.shape)
def load_data():
"""
ๅ ่ฝฝๆฐๆฎ
"""
user_df= pd.read_pickle('./data/ml-1m/user_pick')
movie_df= pd.read_pickle('./data/ml-1m/movie_pick')
rating_df= pd.read_pickle('./data/ml-1m/rating_pick')
user_df= pd.read_pickle('./data/ml-1m/user_pick')
for f in user_df.columns:
if user_df[f].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(user_df[f].values))
user_df[f] = lbl.transform(list(user_df[f].values))
movie_df= | pd.read_pickle('./data/ml-1m/movie_pick') | pandas.read_pickle |
"""
creates triple leveraged simulated datasets going back to the earliest dates for indices
currently does this for QQQ and SPY
Before running this, should be in environment with zipline installed
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import trimboth
from sklearn.ensemble import RandomForestRegressor
# location for saving data
DATADIR = 'eod_data/simulated/'
def check_correlations(df1, df2, plot=False):
"""
checks for correlations between two dataframes;
both must have a 'daily_pct_chg' column
this can be used for checking that a 2 or 3x etf is properly correlated with the underlying asset
also gets the multiplication factor and standard deviation
"""
# check correlation to make sure it's high
both = pd.concat([df1['daily_pct_chg'], df2['daily_pct_chg']], axis=1).dropna()
both.columns = ['regular', 'leveraged']
if plot:
corr = both.corr() # usually around 99.9 or 99.8
both.plot.scatter(x='regular', y='leveraged')
plt.title('correlation: ' + str(round(corr.iloc[0, 1], 4)))
plt.show()
# look at distribution of TQQQ return multiples
t = (both['leveraged'] / both['regular']).fillna(0).to_frame()
t[np.isinf(t[0])] = 0
# exclude outliers, which throw off the mean
# removes right and leftmost 5% of quantiles
new_t = trimboth(t[0], 0.05)
# t = t[t[0] < t[0].quantile(0.9)]
# some large outliers
# t[(t < 6) & (t > -6)].hist(bins=50)
if plot:
plt.hist(new_t, bins=50)
plt.show()
print('mean and std for multiples:')
avg, std = new_t.mean(), new_t.std()
print(avg)
print(std)
return avg
def simulate_leveraged(stocks, etf_names=['QQQ', 'TQQQ', 'SQQQ'], return_dfs=False, write=True):
"""
creates 3x and 2x leveraged ETFS for historical data
QQQ, SPY, todo: DJI
if write, writes to /home/nate/Dropbox/data/eod_data/
stocks is dictionary of dataframes
etf_names is list of 3 etfs: 1 that is the base (unleveraged) ETF, then 1 that is the positive etf, then one that is the negative leveraged ETF
if return_dfs is True, will return the simulated dfs
if write is True, writes simulated dfs to file
"""
normal = stocks[etf_names[0]].copy()
pos = stocks[etf_names[1]].copy()
neg = stocks[etf_names[2]].copy()
pos_sim, neg_sim = create_simulated_leveraged(normal, pos, neg)
# change columns for zipline format
normal.reset_index(inplace=True)
# Zipline will adjust the prices for splits and dividends I think, but better to just use
# Quandl-adjusted prices
normal = normal[['Date', 'Adj_Open', 'Adj_High', 'Adj_Low', 'Adj_Close', 'Adj_Volume', 'Dividend', 'Split']]
normal.columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'dividend', 'split']
normal['dividend'] = 0
normal['split'] = 0
if write:
normal.to_csv(DATADIR + etf_names[0] + '.csv', index=False)
pos_sim.to_csv(DATADIR + etf_names[1] + '.csv', index=False)
neg_sim.to_csv(DATADIR + etf_names[2] + '.csv', index=False)
if return_dfs:
return normal, pos_sim, neg_sim
def create_simulated_leveraged(df, pos_lev_df, neg_lev_df):
# get max and min from untouched pos/neg leveraged dfs for
# price adjusting later
pos_max = pos_lev_df['Close'].max()
pos_min = pos_lev_df['Close'].min()
neg_max = neg_lev_df['Close'].max()
neg_min = neg_lev_df['Close'].min()
# TODO: need to add in splits to keep price between 40 and 180 for TQQQ, in between 10 and 80 for SQQQ
# usually about 2.97 for a 3x etf
df['daily_pct_chg'] = df['Adj_Close'].pct_change()
pos_lev_df['daily_pct_chg'] = pos_lev_df['Adj_Close'].pct_change()
neg_lev_df['daily_pct_chg'] = neg_lev_df['Adj_Close'].pct_change()
# get average multipliers for leveraged etfs
pos_mult = check_correlations(df, pos_lev_df)
neg_mult = check_correlations(df, neg_lev_df)
# get earliest dates for leveraged etfs
earliest_pos = pos_lev_df.index.min()
earliest_neg = neg_lev_df.index.min()
# make dataframes to hold simulated data
pos_sim = df.loc[:earliest_pos].copy()
neg_sim = df.loc[:earliest_neg].copy()
# simulate leveraged from beginning of normal ETF timeframe and calculate returns
col = 'Adj_Close' # only works with Adj_Close for now
# also need to set 'Close' column for calculating splits, etc
pos_lev_df['daily_pct_chg'] = pos_lev_df[col].pct_change()
neg_lev_df['daily_pct_chg'] = neg_lev_df[col].pct_change()
pos_sim.loc[:,
'daily_pct_chg'] *= pos_mult # multiply the original ETF by the average multiple to get leveraged amount
# backcalculate adjuted close
pos_sim.loc[pos_sim.index[1]:, col] = ((pos_sim['daily_pct_chg'] + 1).cumprod() * pos_sim.iloc[0][col])[1:]
neg_sim.loc[:, 'daily_pct_chg'] *= neg_mult
neg_sim.loc[neg_sim.index[1]:, col] = ((neg_sim['daily_pct_chg'] + 1).cumprod() * neg_sim.iloc[0][col])[1:]
# adjust to match latest tqqq price
pos_sim['Close'] = pos_sim['Adj_Close']
neg_sim['Close'] = neg_sim['Adj_Close']
ratio = pos_lev_df.iloc[0]['Close'] / pos_sim.iloc[-1]['Close']
pos_sim['Close'] *= ratio
ratio = pos_lev_df.iloc[0]['Adj_Close'] / pos_sim.iloc[-1]['Adj_Close']
pos_sim['Adj_Close'] *= ratio
# adjust to neg leverage price
ratio = neg_lev_df.iloc[0]['Close'] / neg_sim.iloc[-1]['Close']
neg_sim['Close'] *= ratio
ratio = neg_lev_df.iloc[0]['Adj_Close'] / neg_sim.iloc[-1]['Adj_Close']
neg_sim['Adj_Close'] *= ratio
pos_sim['Split'] = 1
neg_sim['Split'] = 1
pos_sim['Dividend'] = 0
neg_sim['Dividend'] = 0
# contain prices between 40 and 180 for positive leverage, 10 and 80 for negative
low_adj = 0.25 # noticed from SQQQ
high_adj = 2 # noticed from TQQQ
# go backwards thru dataset, since the latest price matches the latest price in the real data
# also start at the 2nd-to-last point, because the last one overlaps the actual data and can't be adjusted
# problem with getting infinity for earliest very large values...just set splits, then do a calculation based on the compound splits
total_split = 1
pos_splits = []
for i, r in pos_sim.iloc[-1::-1].iterrows():
# adjust to total split adjustment to this point
r['Close'] /= total_split
pos_sim.loc[i, 'Close'] = r['Close']
if r['Close'] < pos_min:
# print('less')
r['Split'] = low_adj
pos_splits.append(low_adj)
elif r['Close'] > pos_max:
# print('more')
r['Split'] = high_adj
pos_splits.append(high_adj)
else:
pos_splits.append(1)
total_split *= r['Split']
# TODO: fix issue where first price is not within range
low_adj = 0.25 # noticed from SQQQ
high_adj = 2 # noticed from TQQQ
total_split = 1
neg_splits = []
# doesn't work with .iloc[1:-1:-1]
for i, r in neg_sim.iloc[-1::-1].iterrows():
r['Close'] /= total_split
neg_sim.loc[i, 'Close'] = r['Close']
if r['Close'] < neg_min:
r['Split'] = low_adj
neg_splits.append(low_adj)
elif r['Close'] > neg_max:
r['Split'] = high_adj
neg_splits.append(high_adj)
else:
neg_splits.append(1)
total_split *= r['Split']
pos_sim['Ticker'] = pos_lev_df['Ticker'][0]
neg_sim['Ticker'] = neg_lev_df['Ticker'][0]
pos_sim['Split'] = pos_splits
neg_sim['Split'] = neg_splits
pos_sim['Dividend'] = 0
neg_sim['Dividend'] = 0
pos_sim_full = pd.concat([pos_sim.iloc[:-1], pos_lev_df])
neg_sim_full = | pd.concat([neg_sim.iloc[:-1], neg_lev_df]) | pandas.concat |
import pandas as pd
from genomics_data_index.api.query.GenomicsDataIndex import GenomicsDataIndex
from genomics_data_index.api.query.features.MutationFeaturesComparator import MutationFeaturesComparator
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.model.db import Sample
from genomics_data_index.test.integration import snippy_all_dataframes
def test_summary_all(loaded_database_genomic_data_store: GenomicsDataIndex):
db = loaded_database_genomic_data_store.connection.database
all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}
dfA = pd.read_csv(snippy_all_dataframes['SampleA'], sep='\t')
dfB = pd.read_csv(snippy_all_dataframes['SampleB'], sep='\t')
dfC = pd.read_csv(snippy_all_dataframes['SampleC'], sep='\t')
expected_df = pd.concat([dfA, dfB, dfC])
expected_df = expected_df.groupby('Mutation').agg({
'Sequence': 'first',
'Position': 'first',
'Deletion': 'first',
'Insertion': 'first',
'Mutation': 'count',
}).rename(columns={'Mutation': 'Count'}).sort_index()
expected_df['Total'] = 9
expected_df['Percent'] = 100 * (expected_df['Count'] / expected_df['Total'])
present_set = SampleSet(all_sample_ids)
mutations_summarizer = MutationFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
ignore_annotations=True)
mutations_df = mutations_summarizer.summary(present_set)
mutations_df['Percent'] = mutations_df['Percent'].astype(int) # Convert to int for easier comparison
mutations_df = mutations_df.sort_index()
assert len(expected_df) == len(mutations_df)
assert list(expected_df.columns) == list(mutations_df.columns)
assert list(expected_df.index) == list(mutations_df.index)
assert list(expected_df['Count']) == list(mutations_df['Count'])
assert list(expected_df['Total']) == list(mutations_df['Total'])
assert 22 == mutations_df.loc['reference:619:G:C', 'Percent']
def test_summary_unique(loaded_database_genomic_data_store: GenomicsDataIndex):
db = loaded_database_genomic_data_store.connection.database
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}
mutations_summarizer = MutationFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
ignore_annotations=True)
dfA = pd.read_csv(snippy_all_dataframes['SampleA'], sep='\t')
dfB = | pd.read_csv(snippy_all_dataframes['SampleB'], sep='\t') | pandas.read_csv |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-0.005 * x))
def sigmoid_derivative(x):
return 0.005 * x * (1 - x)
def read_and_divide_into_train_and_test(csv_file):
# Reading csv file here
df = pd.read_csv(csv_file)
# Dropping unnecessary column
df.drop(['Code_number'], axis=1, inplace=True)
# Replacing missing values in the Bare Nuclei column with mean of rest of the values
df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce')
mean_missing = int(round(df['Bare_Nuclei'].mean()))
df['Bare_Nuclei'] = df['Bare_Nuclei'].replace(np.NaN, mean_missing).astype(int)
# Splitting dataframe into testing and training dataframes
training_df = df.sample(frac=0.8, random_state=0)
test_df = df.drop(training_df.index)
training_inputs = training_df.iloc[:, :-1]
training_labels = training_df.iloc[:, -1]
test_inputs = test_df.iloc[:, :-1]
test_labels = test_df.iloc[:, -1]
# Creating the correlation heatmap of the dataframe
df.drop(['Class'], axis=1, inplace=True)
correlation = df.corr()
plt.figure(figsize=(10, 10))
heatmap = plt.imshow(correlation, cmap='hot')
plt.xticks(range(len(correlation)), correlation, rotation=90)
plt.yticks(range(len(correlation)), correlation)
for i in range(len(correlation)):
for j in range(len(correlation)):
if round(correlation.iloc[i, j], 2) > .5:
color = 'k'
else:
color = 'w'
plt.text(j, i, round(correlation.iloc[i, j], 2),
ha="center", va="center", color=color)
plt.fill()
plt.colorbar(heatmap)
print("Please close the heatmap to continue...")
plt.show()
plt.close()
return training_inputs, training_labels, test_inputs, test_labels
def run_on_test_set(test_inputs, test_labels, weights):
test_output = sigmoid(test_inputs.dot(weights))
tp = 0
test_predictions = []
for i, j in test_output.iterrows():
j = float(j)
if j < 0.5:
test_predictions.append(0)
elif j >= 0.5:
test_predictions.append(1)
for predicted_val, label in zip(test_predictions, test_labels):
if predicted_val == label:
tp += 1
# accuracy = tp_count / total number of samples
accuracy = tp / len(test_labels)
return accuracy
def plot_loss_accuracy(accuracy_array, loss_array):
fig, (ax1, ax2) = plt.subplots(2)
ax1.plot(accuracy_array)
ax1.set_title('Accuracy')
ax2.plot(loss_array)
ax2.set_title('Loss')
plt.show()
def main():
csv_file = './breast-cancer-wisconsin.csv'
| pd.read_csv(csv_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 12:09:05 2018
ๅๅงfitness:0.26 ;;ๅๅทฅๅต๏ผ1.012
@author: cwktu
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import subprocess
from math import sqrt
import os
##%%ๆฑๆฒ็บฟๆ็ๆๅคง็น(ๅณ็็ซ็็ง็น)
def combustion_time(data,m):
slope_max = 0
time = 0
if m==1:
for i in range(len(data)-1):
timex1 = data['Time_Soln#1_(sec)'][i]
timex2 = data['Time_Soln#1_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#1_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#1_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==2:
for i in range(len(data)-1):
timex1 = data['Time_Soln#2_(sec)'][i]
timex2 = data['Time_Soln#2_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#2_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#2_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==3:
for i in range(len(data)-1):
timex1 = data['Time_Soln#3_(sec)'][i]
timex2 = data['Time_Soln#3_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#3_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#3_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==4:
for i in range(len(data)-1):
timex1 = data['Time_Soln#4_(sec)'][i]
timex2 = data['Time_Soln#4_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#4_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#4_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==5:
for i in range(len(data)-1):
timex1 = data['Time_Soln#5_(sec)'][i]
timex2 = data['Time_Soln#5_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#5_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#5_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==6:
for i in range(len(data)-1):
timex1 = data['Time_Soln#6_(sec)'][i]
timex2 = data['Time_Soln#6_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#6_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#6_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==7:
for i in range(len(data)-1):
timex1 = data['Time_Soln#7_(sec)'][i]
timex2 = data['Time_Soln#7_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#7_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#7_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==8:
for i in range(len(data)-1):
timex1 = data['Time_Soln#8_(sec)'][i]
timex2 = data['Time_Soln#8_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#8_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#8_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==9:
for i in range(len(data)-1):
timex1 = data['Time_Soln#9_(sec)'][i]
timex2 = data['Time_Soln#9_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#9_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#9_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==10:
for i in range(len(data)-1):
timex1 = data['Time_Soln#10_(sec)'][i]
timex2 = data['Time_Soln#10_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#10_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#10_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==11:
for i in range(len(data)-1):
timex1 = data['Time_Soln#11_(sec)'][i]
timex2 = data['Time_Soln#11_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#11_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#11_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==12:
for i in range(len(data)-1):
timex1 = data['Time_Soln#12_(sec)'][i]
timex2 = data['Time_Soln#12_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#12_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#12_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==13:
for i in range(len(data)-1):
timex1 = data['Time_Soln#13_(sec)'][i]
timex2 = data['Time_Soln#13_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#13_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#13_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
return slope_max,time
def error(a,b):
error=[]
for i in range(len(a)):
error.append(abs((a[i]-b[i])/b[i]))
me = sum(error)/len(error)
return me
def Mean_squared_error(a,b):
error=[]
for i in range(len(a)):
error.append((a[i]-b[i])*(a[i]-b[i]))
mse = sum(error)/len(error)
rmse = sqrt(mse)
return rmse
def mechanism_computation(path):
data1 = pd.read_csv(path+"/CKSoln_solution_no_1.csv")
data2 = pd.read_csv(path+"/CKSoln_solution_no_2.csv")
data3 = pd.read_csv(path+"/CKSoln_solution_no_3.csv")
data4 = pd.read_csv(path+"/CKSoln_solution_no_4.csv")
data5 = pd.read_csv(path+"/CKSoln_solution_no_5.csv")
data6 = pd.read_csv(path+"/CKSoln_solution_no_6.csv")
data7 = pd.read_csv(path+"/CKSoln_solution_no_7.csv")
data8 = | pd.read_csv(path+"/CKSoln_solution_no_8.csv") | pandas.read_csv |
import heapq
import pandas as pd
class huff:
def __init__(self,WordList=[],inputF=0):
self.inputF=inputF
##if series is 1 it translates WordList from pandas to list, and returns pandas the encoded result
if self.inputF == 1:
self.WL=WordList.tolist()
else :
self.WL = WordList
self.frekans=self.calculateFreq(self.WL)
self.key=self.createTree(self.frekans) #list form
self.keyDict=self.keytoDict(self.key)
def calculateFreq(self,WordList):
frekans={}
for i in WordList:
for k in i:
if k in frekans:
frekans[k]+=1
else:
frekans[k]=1
return frekans
def encode(self,WordList,keyDict):
encoded=[]
for i in WordList:
stri=""
for j in i:
stri+=keyDict[j]
encoded.append(stri)
if self.inputF == 0:
return encoded
if self.inputF == 1:
return | pd.Series(encoded) | pandas.Series |
import numpy as np
import pandas as pd
import json
import random
from matplotlib import pyplot as plt
from ai4netmon.Analysis.bias import bias_utils as bu
from ai4netmon.Analysis.bias import radar_chart
print('####### Example 2 - bias in RIPE monitors')
## datasets
AGGREGATE_DATA_FNAME = '../../data/aggregate_data/asn_aggregate_data_20211201.csv'
RIPE_RIS_FNAME = '../../data/misc/RIPE_RIS_peers_ip2asn.json'
## features
CATEGORICAL_FEATURES = ['AS_rank_source', 'AS_rank_iso', 'AS_rank_continent', 'is_personal_AS', 'peeringDB_info_ratio',
'peeringDB_info_traffic', 'peeringDB_info_scope', 'peeringDB_info_type', 'peeringDB_policy_general']
NUMERICAL_FEATURES = ['AS_rank_numberAsns', 'AS_rank_numberPrefixes', 'AS_rank_numberAddresses', 'AS_rank_total',
'AS_rank_customer', 'AS_rank_peer', 'AS_rank_provider', 'peeringDB_info_prefixes4', 'peeringDB_info_prefixes6',
'peeringDB_ix_count', 'peeringDB_fac_count', 'AS_hegemony']
FEATURES = CATEGORICAL_FEATURES+NUMERICAL_FEATURES
## useful methods
def get_feature_type(feature):
if feature in CATEGORICAL_FEATURES:
data_type = 'categorical'
elif feature in NUMERICAL_FEATURES:
data_type = 'numerical'
else:
raise ValueError
return data_type
## load data
df = | pd.read_csv(AGGREGATE_DATA_FNAME, header=0, index_col=0) | pandas.read_csv |
"""Epochs analysis module.
The functions in the module use local normalization to compute the returns, the
normalized returns and the correlation matrix of financial time series.
This script requires the following modules:
* itertools
* math
* multiprocessing
* pickle
* typing
* numpy
* pandas
* epochs_tools
The module contains the following functions:
* returns_data - computes the returns of the time series.
* epochs_volatility_data - uses local normalization to compute the
volatility of the time series.
* epochs_normalized_returns_data - uses rolling normalization to normalize
the returns of the time series.
* epochs_correlation_matrix_data - uses local normalization to compute the
correlation matrix of the normalized returns.
* epochs_aggregated_dist_returns_pair_data - uses local normalization to
compute the aggregated distribution of returns for a pair of stocks.
* epochs_aggregated_dist_returns_market_data - uses local normalization to
compute the aggregated distribution of returns for a market.
* main - the main function of the script.
..moduleauthor:: <NAME> <www.github.com/juanhenao21>
"""
# -----------------------------------------------------------------------------
# Modules
from itertools import product as iprod
from itertools import combinations as icomb
import math
import multiprocessing as mp
import pickle
from typing import Any, Iterable, List, Tuple
import numpy as np # type: ignore
import pandas as pd # type: ignore
import epochs_tools
# -----------------------------------------------------------------------------
def returns_data(dates: List[str], time_step: str) -> None:
"""Computes the returns of the time series.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01-01', '2020-12-01']).
:param time_step: time step of the data (i.e. '1m', '1h', '1d', 'wk',
'1mo').
:return: None -- The function saves the data in a file and does not return
a value.
"""
function_name: str = returns_data.__name__
epochs_tools.function_header_print_data(function_name, dates, time_step, "", "")
try:
# Load data
data: pd.DataFrame = pickle.load(
open(
f"../data/original_data/original_data_{dates[0]}_{dates[1]}_step"
+ f"_{time_step}.pickle",
"rb",
)
)
returns_df: pd.DataFrame = data.pct_change().dropna()
returns_df = returns_df.iloc[:, :200]
# Saving data
epochs_tools.save_data(returns_df, function_name, dates, time_step, "", "")
except FileNotFoundError as error:
print("No data")
print(error)
print()
# -----------------------------------------------------------------------------
def epochs_volatility_data(dates: List[str], time_step: str, window: str) -> None:
"""Uses local normalization to compute the volatility of the time series.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01-01', '2020-12-31']).
:param time_step: time step of the data (i.e. '1m', '1h', '1d', '1wk',
'1mo').
:param window: window time to compute the volatility (i.e. '25').
:return: None -- The function saves the data in a file and does not return
a value.
"""
function_name: str = epochs_volatility_data.__name__
epochs_tools.function_header_print_data(function_name, dates, time_step, window)
try:
# Load data
data: pd.DataFrame = pickle.load(
open(
f"../data/epochs/returns_data_{dates[0]}_{dates[1]}_step"
+ f"_{time_step}_win_.pickle",
"rb",
)
)
std_df: pd.DataFrame = data.rolling(window=int(window)).std().dropna()
# Saving data
epochs_tools.save_data(std_df, function_name, dates, time_step, window)
except FileNotFoundError as error:
print("No data")
print(error)
print()
# -----------------------------------------------------------------------------
def epochs_normalized_returns_data(
dates: List[str], time_step: str, window: str
) -> None:
"""Uses rolling normalization to normalize the returns of the time series.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01-01', '2020-12-31']).
:param time_step: time step of the data (i.e. '1m', '1h', '1d', 'wk',
'1mo').
:param window: window time to compute the volatility (i.e. '60').
:return: None -- The function saves the data in a file and does not return
a value.
"""
function_name: str = epochs_normalized_returns_data.__name__
epochs_tools.function_header_print_data(function_name, dates, time_step, window)
try:
# Load data
data: pd.DataFrame = pickle.load(
open(
f"../data/epochs/returns_data_{dates[0]}_{dates[1]}_step"
+ f"_{time_step}_win_.pickle",
"rb",
)
)
data_win = data.iloc[int(window) - 1 :]
data_mean = data.rolling(window=int(window)).mean().dropna()
data_std = data.rolling(window=int(window)).std().dropna()
normalized_df: pd.DataFrame = (data_win - data_mean) / data_std
# Saving data
epochs_tools.save_data(normalized_df, function_name, dates, time_step, window)
except FileNotFoundError as error:
print("No data")
print(error)
print()
# -----------------------------------------------------------------------------
def epochs_correlation_matrix_data(
dates: List[str], time_step: str, window: str
) -> None:
"""uses local normalization to compute the correlation matrix of the
normalized returns.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01-01', '2020-12-31']).
:param time_step: time step of the data (i.e. '1m', '1h', '1d', '1wk',
'1mo').
:param window: window time to compute the volatility (i.e. '25').
:return: None -- The function saves the data in a file and does not return
a value.
"""
function_name: str = epochs_correlation_matrix_data.__name__
epochs_tools.function_header_print_data(function_name, dates, time_step, window)
try:
# Load data
data: pd.DataFrame = pickle.load(
open(
f"../data/epochs/epochs_normalized_returns_data_{dates[0]}"
+ f"_{dates[1]}_step_{time_step}_win_{window}.pickle",
"rb",
)
)
corr_matrix_df: pd.DataFrame = data.corr()
# Saving data
epochs_tools.save_data(corr_matrix_df, function_name, dates, time_step, window)
except FileNotFoundError as error:
print("No data")
print(error)
print()
except TypeError as error:
print("To compute the correlation is needed at least to stocks")
print(error)
print()
# ----------------------------------------------------------------------------
def epochs_aggregated_dist_returns_pair_data(
dates: List[str], time_step: str, cols: List[str], window: str, norm: str = "long"
) -> List[float]:
"""Uses local normalization to compute the aggregated distribution of
returns for a pair of stocks.
:param dates: List of the interval of dates to be analyzed
(i.e. ['1980-01-01', '2020-12-31']).
:param time_step: time step of the data (i.e. '1m', '1h', '1d', '1wk',
'1mo').
:param cols: pair of stocks to be analized (i. e. ['AAPL', 'MSFT']).
:param window: window time to compute the volatility (i.e. '25').
:norm: define if the normalization is made in the complete time series or
in each epoch. Default 'long', 'short' is the other option.
:return: List[float] -- The function returns a list with float numbers.
"""
try:
# Load data
two_col: pd.DataFrame = pickle.load(
open(
f"../data/epochs/returns_data_{dates[0]}_{dates[1]}_step"
+ f"_{time_step}_win__K_.pickle",
"rb",
)
)[[cols[0], cols[1]]]
if norm == "long":
two_col = (two_col - two_col.mean()) / two_col.std()
# List to extend with the returns values of each pair
agg_ret_mkt_list: List[float] = []
# Add the index as a column to group the return values
two_col["DateCol"] = two_col.index
# Add a column grouping the returns in the time window
if time_step == "1m":
two_col["DateCol"] = pd.to_datetime(two_col["DateCol"])
two_col["Group"] = two_col.groupby(
| pd.Grouper(key="DateCol", freq=window + "T") | pandas.Grouper |
# Vermont Police Tools - Tools for cleaning Vermont police data
#
# Written in 2020 by BTV CopWatch <<EMAIL>>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""Tools for cleaning Chittenden County Sheriff's Office data."""
import pandas as pd
from .. import utils
def clean_roster(csv):
"""Clean a digitized roster."""
@utils.nullable
def clean_job_title(job_title):
job_title_choices = {
"Sheriff": "Sheriff",
"Captain": "Captain",
"Lieutenant": "Lieutenant",
"Sergeant": "Sergeant",
"Corporal": "Corporal",
"Deputy": "Deputy",
"Deuty": "Deputy", # Fix a typo.
}
return job_title_choices[job_title]
@utils.nullable
def clean_star_no(star_no):
assert star_no[:2] == "CC"
return int(star_no[2:])
@utils.nullable
def clean_employment_date(employment_date):
return pd.to_datetime(employment_date.replace(".", "/"))
@utils.nullable
def clean_race(race):
race_choices = {
"B": "BLACK",
"W": "WHITE",
}
return race_choices[race.capitalize()]
@utils.nullable
def clean_gender(gender):
return gender.capitalize()
dirty = pd.read_csv(csv)
# Get the year from the name of the rightmost column.
salary_label = dirty.columns[-2]
overtime_pay_label = dirty.columns[-1]
salary_year = int(salary_label.split()[-1])
cleaned = | pd.DataFrame() | pandas.DataFrame |
import oemof.solph
import pandas as pd
from pommesdispatch.model_funcs import model_control
def return_model_and_parameters():
"""Create and parameterize a dispatch model using default values"""
dm = model_control.DispatchModel()
control_parameters = {
"rolling_horizon": False,
"aggregate_input": False,
"countries": ['AT', 'BE', 'CH', 'CZ', 'DE', 'DK1', 'DK2', 'FR',
'NL', 'NO1', 'NO2', 'NO3', 'NO4', 'NO5', 'PL',
'SE1', 'SE2', 'SE3', 'SE4'],
"solver": "gurobi",
"fuel_cost_pathway": "middle",
"activate_emissions_limit": False,
"emissions_pathway": "100_percent_linear",
"activate_demand_response": False,
"demand_response_approach": "DLR",
"demand_response_scenario": "50",
"save_production_results": True,
"save_price_results": True,
"write_lp_file": False}
time_parameters = {
"start_time": "2017-01-01 00:00:00",
"end_time": "2017-01-01 04:00:00",
"freq": "60min"}
input_output_parameters = {
"path_folder_input": "tests/csv_files/",
"path_folder_output": "tests/csv_files/"}
all_parameters = {**control_parameters, **time_parameters,
**input_output_parameters}
dm.update_model_configuration(control_parameters, time_parameters,
input_output_parameters, nolog=True)
return dm, all_parameters
def set_up_rolling_horizon_run():
"""Set up a model for a rolling horizon run"""
model_meta = {
"overall_objective": 0,
"overall_time": 0,
"overall_solution_time": 0
}
dm, all_parameters = return_model_and_parameters()
dm.update_model_configuration({"rolling_horizon": True}, nolog=True)
rolling_horizon_parameters = {
"time_slice_length_wo_overlap_in_hours": 2,
"overlap_in_hours": 1}
dm.add_rolling_horizon_configuration(
rolling_horizon_parameters, nolog=True)
iteration_results = {
"storages_initial": pd.DataFrame(),
"model_results": {},
"dispatch_results": pd.DataFrame(),
"power_prices": | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/22 19:56
contact: <EMAIL>
desc: ่ฑไธบ่ดขๆ
-ๅคๆฑ-่ดงๅธๅฏนๅๅฒๆฐๆฎ
https://cn.investing.com/currencies/
https://cn.investing.com/currencies/eur-usd-historical-data
"""
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import short_headers, long_headers
def currency_name_url():
url = "https://cn.investing.com/currencies/"
res = requests.post(url, headers=short_headers)
data_table = pd.read_html(res.text)[0].iloc[:, 1:] # ๅฎๆถ่ดงๅธ่กๆ
data_table.columns = ['ไธญๆๅ็งฐ', '่ฑๆๅ็งฐ', 'ๆๆฐ', 'ๆ้ซ', 'ๆไฝ', 'ๆถจ่ท้ข', 'ๆถจ่ทๅน
', 'ๆถ้ด']
name_code_dict = dict(zip(data_table["ไธญๆๅ็งฐ"].tolist(), [item.lower().replace("/", "-") for item in data_table["่ฑๆๅ็งฐ"].tolist()]))
return name_code_dict
def currency_hist(index_name="ๆฌงๅ
/็พๅ
", start_date="2005/01/01", end_date="2020/01/17"):
"""
ๅคๆฑๅๅฒๆฐๆฎ่ทๅ, ๆณจๆ่ทๅๆฐๆฎๅบ้ด็้ฟ็ญ
:param index_name: {'ๆฌงๅ
/็พๅ
': 'eur-usd', '่ฑ้/็พๅ
': 'gbp-usd', '็พๅ
/ๆฅๅ
': 'usd-jpy', 'ๆพณๅคงๅฉไบๅ
/็พๅ
': 'aud-usd', '็พๅ
/ๅ ๆฟๅคงๅ
': 'usd-cad', 'ๆพณๅคงๅฉไบๅ
/ๆธฏๅธ': 'aud-hkd', 'ๆฐ่ฅฟๅ
ฐๅ
/็พๅ
': 'nzd-usd', '็พๅ
/ไบบๆฐๅธ': 'usd-cny', 'ๆพณๅคงๅฉไบๅ
/ไบบๆฐๅธ': 'aud-cny', 'ไฟ็ฝๆฏๅขๅธ/ไบบๆฐๅธ': 'rub-cny'}
:type index_name: str
:param start_date: ๆฅๆ
:type start_date: str
:param end_date: ๆฅๆ
:type end_date: str
:return: ่ดงๅธๅฏนๅๅฒๆฐๆฎ
:rtype: pandas.DataFrame
"""
name_code_dict = currency_name_url()
temp_url = f"https://cn.investing.com/currencies/{name_code_dict[index_name]}-historical-data"
res = requests.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = requests.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[0].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = requests.post(url, data=payload, headers=long_headers)
soup = BeautifulSoup(res.text, "lxml")
vest_list = [item.get_text().strip().split("\n") for item in soup.find_all("tr")]
raw_df = | pd.DataFrame(vest_list) | pandas.DataFrame |
"""
Access the most common functions of Facebook api.
Download posts by ids, download all posts by given profile, profile infos,
all with rate limiting.
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import time
import requests
import pandas as pd
import facebook
import logging
from functools import wraps
import pytz
# TODO what error message is printed for calls for posts older than profile?
POST_FIELDS = [ # list of fields called when acquiring posts by Facebook SDK
"id",
"application",
"caption",
"created_time",
"description",
"from",
"link",
"message",
"message_tags",
"name",
"object_id",
"parent_id",
"permalink_url",
"picture",
"place",
"properties",
"status_type",
"story",
"type",
"updated_time",
"comments.filter(stream).limit(0).summary(true)",
"likes.limit(0).summary(true)",
"shares",
"reactions.summary(true)",
]
COMMENT_FIELDS = [
"id",
"comment_count",
"created_time",
"from",
"like_count",
"message",
"message_tags",
"object",
"parent",
]
POST_INSIGHT_FIELD = "insights.metric({})".format( # extra insights field
",".join([
"post_activity_by_action_type_unique",
"post_impressions_unique",
"post_impressions_paid_unique",
"post_impressions_fan_unique",
"post_impressions_fan_paid_unique",
"post_impressions_organic_unique",
"post_impressions_viral_unique",
"post_impressions_nonviral_unique",
"post_impressions_by_story_type_unique",
"post_engaged_users",
"post_negative_feedback_by_type_unique",
"post_engaged_fan",
"post_clicks_by_type_unique",
"post_reactions_by_type_total",
])) # TEST tested last in 2019, if needed, test!
class SocialMediaApi:
"""Handles common features of Facebook and Twitter api.
Methods
-------
profiles_info(ids, path=None) -> pd.DataFrame
Returns profiles info acquired by api calls.
posts(ids, insights=False, path=None) -> pd.DataFrame
Returns posts acquired by api calls.
profiles_posts(ids, since, until, n=100000, insights=False, path=None)
Returns posts from profiles within time range acquired by api calls.
"""
def save_df(self, df: pd.DataFrame, path: str):
"""Return DataFrame of elements and saves it to csv or excel path.
Parameters
----------
df : pd.DataFrame
DataFrame to save
path : str
path to table output file. Based on extension, table is saved to:
None: nowhere
.xlsx: Excel spreadsheet
else: csv table
"""
if path is None:
pass
elif path.endswith(".xlsx"):
with pd.ExcelWriter( # excel does not support timezones
path=path,
engine='xlsxwriter',
options={'remove_timezone': True}) as writer:
df.to_excel(writer)
else:
df.to_csv(path, encoding="utf-8")
def add_info(self, df: pd.DataFrame):
"""Return `df` with profiles info columns.
All new columns are added to the end of `df` with 'profile_' prefix.
Parameters
----------
df : pd.DataFrame
Posts dataframe. Must consist of self.post_from_col column.
"""
if len(df) == 0:
logging.warning(
("no posts downloaded, thus no profile info is downloaded"))
return df
else:
ids_downloaded = list(df[self.post_from_col].unique())
info_df = self.profiles_info(ids_downloaded)
info_df = info_df.add_prefix("profile_")
info_df.profile_api_call_id = info_df.profile_api_call_id.astype(
"str")
df[self.post_from_col] = df[self.post_from_col].astype("str")
df = df.merge(
info_df, how="left",
left_on=self.post_from_col,
right_on="profile_api_call_id")
return df
def profiles_info(self, ids: list, path: str=None) -> pd.DataFrame:
"""Return profiles info acquired by api calls.
Parameters
----------
ids : list of strs or ints
ids or names of profiles
path : str or None (default None)
if passed, dataframe is saved there as csv
Output index 'api_call_id' notes profile id/name on which api was called.
"""
elements = []
for i in ids:
element = self.get_profile_info(i)
element["api_call_id"] = i
elements.append(element)
df = pd.DataFrame(elements)
self.save_df(df, path)
return df
def posts(
self,
ids: list,
insights: bool=False,
comments: bool=False,
info: bool=False,
path: str=None) -> pd.DataFrame:
"""Return posts acquired by api calls.
Parameters
----------
ids : list of strs or ints
posts ids
insights : bool (default False)
whether insight fields should be called (requires page access token
with admin rights)
comments : bool (default False)
whether comments_reactions should be included (demands downloading
all comments)
info : bool (default False)
whether profiles info should be included (demands downloading all
profiles info); usage includes accessing Facebook profile fans
path : str or None (default None)
if passed, dataframe is saved there as csv
"""
posts = []
for i in ids:
element = self.get_post(i, insights=insights)
if element is not None:
posts.extend(self.transform_posts([element], i))
return posts
try:
df = pd.DataFrame(posts)
except AttributeError:
print(posts)
return posts
if info:
df = self.add_info(df)
if comments:
df = self.add_comments(df)
self.save_df(df, path)
return df
def profiles_posts(
self,
ids: list,
since: datetime.datetime,
until: datetime.datetime=datetime.datetime(2100, 1, 1),
n: int=100000,
insights: bool=False,
comments: bool=False,
info: bool=False,
path: str=None
) -> pd.DataFrame:
"""Return posts from profiles within time range acquired by api calls.
Parameters
----------
ids : list of strs or ints
profiles ids or names
since : datetime.datetime
start of time range
until : datetime.datetime
end of time range
n : int (default 100,000)
maximal number of downloaded posts
insights : bool (default False)
whether insight fields should be called (requires page access token
with admin rights)
comments : bool (default False)
whether comments_reactions should be included (demands downloading
all comments)
info : bool (default False)
whether profiles info should be included (demands downloading all
profiles info); usage includes accessing Facebook profile fans
path : str (default None)
if passed, dataframe is saved there as csv
"""
def add_timezone(x):
if x.tzinfo is None:
return pytz.utc.localize(x)
else:
return x
def paginate_elements(first_connection):
connection = first_connection
while (
(len(elements) < n) and
(pd.to_datetime(
self.connection_date(elements[-1])) > since_tz)):
try:
connection = self.get_next_connection(connection)
except KeyError:
logging.info(
("connection has no next page: downloading profile "
"posts with parameters {}")
.format([i, since, until, n, insights]))
return elements
if self.returns_data(connection):
elements.extend(self.connection_data(connection))
else:
logging.error(
("no data in connection {} while downloading profile "
"posts with parameters {}")
.format(connection, [i, since, until, n, insights]))
return elements
return elements
# timezone is needed for dates comparison
since_tz = add_timezone(since)
until_tz = add_timezone(until)
posts = []
for i in ids:
elements = []
# First, self.get_profile_posts_initial_page is called.
connection = self.get_profile_posts_initial_call(
i, since_tz, until_tz, n, insights)
if self.returns_data(connection):
elements.extend(self.connection_data(connection))
# Afterwards, next page of results is listed until number of
# posts or time range is exceeded.
elements = paginate_elements(connection)
posts.extend(self.transform_posts(elements, i))
df = pd.DataFrame(posts)
# erase posts outside of time range (Twitter does not allow to
# download specific time range, but posts have to be downloaded from
# present backwards)
if len(df) > 0:
try:
df = df.loc[
((df.created_time >= since) & (df.created_time <= until))]
except AttributeError:
logging.warning(
("time range control not executed: downloaded table has "
"no 'created_time' column; present columns: {}")
.format(df.columns))
if info:
df = self.add_info(df)
if comments:
df = self.add_comments(df)
self.save_df(df, path)
return df
class FbApi(SocialMediaApi):
"""Handle downloading from Facebook Graph api.
Attributes
----------
token: Facebook access token
(app access token or page access token, based on usage)
Methods
-------
profiles_info(ids, path=None) -> pd.DataFrame
Returns profiles info acquired by api calls.
posts(ids, insights=False, path=None) -> pd.DataFrame
Returns posts acquired by api calls.
profiles_posts(ids, since, until, n=100000, insights=False, path=None)
Returns posts from profiles within time range acquired by api calls.
posts_comments(ids, n=100000, path=False) -> pd.DataFrame
Returns comments under posts with given ids acquired by api calls.
Examples
--------
>>> f = FbApi("fb_access_token")
>>> f.profiles_info(profiles_ids)
>>> f.posts(posts_ids)
>>> f.profiles_posts(profiles_ids, since, until)
>>> f.posts_comments(posts_ids)
Notes
-----
Wraps up Facebook Python SDK library:
https://github.com/mobolic/facebook-sdk
"""
def __init__(self, token="xxx"):
self.api = facebook.GraphAPI(token, version="3.1")
def rate_limit_sdk(func): # TODO max_tries and wait as parameters
"""Return `func` multiple times in case of SDK limit error.
After each rate limited call, waits for 15 minutes and tries again
or gives up after 2 hour limit is reached.
Twin method for self.rate_limit_requests (only difference in error
code location).
"""
@wraps(func)
def wrapper(*args, **kwargs):
max_wait = 7200
wait = 900
max_tries = max_wait // wait
tries = 0
while tries <= max_tries:
try:
return func(*args, **kwargs)
except facebook.GraphAPIError as e:
if e.code == 4:
logging.warning(
"request limit reached, waiting for 15 minutes")
time.sleep(wait)
else:
logging.warning(
("Facebook sdk returned error message while"
"calling {} with args: {}, kwargs: {}, error: {}")
.format(
func.__name__,
args,
kwargs,
e))
return []
tries += 1
logging.error(
("request limit not solved, downloading stopped "
"while calling {} with args: {}, kwargs: {}")
.format(func.__name__, *args, **kwargs))
return []
return wrapper
def rate_limit_requests(func): # TODO max_tries and wait as parameters
"""Return `func` multiple times in case of request limit error.
After each rate limited call, waits for 15 minutes and tries again
or gives up after 2 hour limit is reached.
Twin method for self.rate_limit_sdk (only difference in error
code location).
"""
@wraps(func)
def wrapper(*args, **kwargs):
max_wait = 7200
wait = 900
max_tries = max_wait // wait
tries = 0
while tries <= max_tries:
connection = func(*args, **kwargs)
if "error" in connection:
if connection["error"]["code"] == 4:
logging.warning(
"request limit reached, waiting for 15 minutes")
time.sleep(wait)
else:
logging.warning(
("Facebook sdk returned error message while"
"calling {} with args: {}, kwargs: {}, \n\n"
"error: {}")
.format(
func.__name__,
args,
kwargs,
connection["error"]))
return []
else:
return connection
tries += 1
logging.error(
("request limit not solved, downloading stopped "
"while calling {} with args: {}, kwargs: {}")
.format(func.__name__, *args, **kwargs))
return []
return wrapper
def returns_data(self, connection) -> bool:
try:
return connection["data"] != []
except (KeyError, TypeError):
return False
def connection_data(self, connection):
return connection["data"]
def connection_date(self, connection):
return connection["created_time"]
def transform_posts(self, posts: list, i) -> list:
"""Return list of posts in dictionaries with additional collumns.
Parameters
----------
posts : list of dicts
posts to transform
i : str or int
api call id noting how the row was acquired (ie. from profile with
given `i` or by calling post id `i` directly)
New columns:
comments_count
likes_count
reactions_count
shares_count
interactions ( = comments_count + reactions_count + shares_count)
post_link
from_id
from_name
api_call_id ( = i)
Updated columns:
created_time is pd.to_datetime'd
"""
def post_with_additional_collumns(post):
if post == []:
return []
else:
try:
post["comments_count"] = post["comments"][
"summary"]["total_count"]
except KeyError:
post["comments_count"] = None
try:
post["from_id"] = post["from"]["id"]
except KeyError:
post["from_id"] = None
try:
post["from_name"] = post["from"]["name"]
except KeyError:
post["from_name"] = None
post["created_time"] = | pd.to_datetime(post["created_time"]) | pandas.to_datetime |
__author__ = '<NAME>'
import pandas as pd
import numpy as np
class Preprocess_Dataframe(object):
"""
Check property of dataset,
alarm irregularities in case of wrongs in data
Arguments:
data: population sent in file or dataframe
config: self config or default
Returns:
population: exact dataset for futher processing -> pandas.DataFrame
"""
def __init__(self, data, config):
self.data = data
self.config = config
self.population = None
def get_file(self):
if type(self.data) is not str:
self.population = | pd.DataFrame(data=self.data) | pandas.DataFrame |
# encoding: utf-8
from __future__ import print_function
import os
from collections import OrderedDict
import numpy as np
import pandas as pd
from . import performance as pfm
from . import plotting
import jaqs.util as jutil
from jaqs.trade import common
class SignalDigger(object):
"""
Attributes
----------
signal_data : pd.DataFrame - MultiIndex
Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile']
period : int
Horizon used to calculate return.
n_quantiles : int
output_format : str
output_folder : str
"""
def __init__(self, output_folder=".", output_format='pdf'):
self.output_format = output_format
self.output_folder = os.path.abspath(output_folder)
self.signal_data = None
self.period = None
self.n_quantiles = 5
self.benchmark_ret = None
self.returns_report_data = dict()
self.ic_report_data = dict()
self.fig_data = dict()
self.fig_objs = dict()
def process_signal_before_analysis(self,
signal, price=None, ret=None, benchmark_price=None,
period=5, n_quantiles=5,
mask=None,
forward=False):
"""
Prepare for signal analysis.
Parameters
----------
signal : pd.DataFrame
Index is date, columns are stocks.
price : pd.DataFrame
Index is date, columns are stocks.
ret : pd.DataFrame
Index is date, columns are stocks.
benchmark_price : pd.DataFrame or pd.Series or None
Price of benchmark.
mask : pd.DataFrame
Data cells that should NOT be used.
n_quantiles : int
period : int
periods to compute forward returns on.
Returns
-------
res : pd.DataFrame
Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile']
"""
"""
Deal with suspensions:
If the period of calculating return is d (from T to T+d), then
we do not use signal values of those suspended on T,
we do not calculate return for those suspended on T+d.
"""
# ----------------------------------------------------------------------
# parameter validation
if price is None and ret is None:
raise ValueError("One of price / ret must be provided.")
if price is not None and ret is not None:
raise ValueError("Only one of price / ret should be provided.")
if ret is not None and benchmark_price is not None:
raise ValueError("You choose 'return' mode but benchmark_price is given.")
if not (n_quantiles > 0 and isinstance(n_quantiles, int)):
raise ValueError("n_quantiles must be a positive integer. Input is: {}".format(n_quantiles))
# ensure inputs are aligned
data = price if price is not None else ret
assert np.all(signal.index == data.index)
assert np.all(signal.columns == data.columns)
if mask is not None:
assert np.all(signal.index == mask.index)
assert np.all(signal.columns == mask.columns)
mask = jutil.fillinf(mask)
mask = mask.astype(int).fillna(0).astype(bool) # dtype of mask could be float. So we need to convert.
else:
mask = pd.DataFrame(index=signal.index, columns=signal.columns, data=False)
signal = jutil.fillinf(signal)
data = jutil.fillinf(data)
# ----------------------------------------------------------------------
# save data
self.n_quantiles = n_quantiles
self.period = period
# ----------------------------------------------------------------------
# Get dependent variables
if price is not None:
df_ret = pfm.price2ret(price, period=self.period, axis=0)
if benchmark_price is not None:
benchmark_price = benchmark_price.loc[signal.index]
bench_ret = pfm.price2ret(benchmark_price, self.period, axis=0)
self.benchmark_ret = bench_ret
residual_ret = df_ret.sub(bench_ret.values.flatten(), axis=0)
else:
residual_ret = df_ret
else:
residual_ret = ret
# Get independent varibale
signal = signal.shift(1) # avoid forward-looking bias
# forward or not
if forward:
# point-in-time signal and forward return
residual_ret = residual_ret.shift(-self.period)
else:
# past signal and point-in-time return
signal = signal.shift(self.period)
# ----------------------------------------------------------------------
# get masks
# mask_prices = data.isnull()
# Because we use FORWARD return, if one day's price is broken, the day that is <period> days ago is also broken.
# mask_prices = np.logical_or(mask_prices, mask_prices.shift(self.period))
mask_price_return = residual_ret.isnull()
mask_signal = signal.isnull()
mask_tmp = np.logical_or(mask_signal, mask_price_return)
mask_all = np.logical_or(mask, mask_tmp)
# if price is not None:
# mask_forward = np.logical_or(mask, mask.shift(self.period).fillna(True))
# mask = np.logical_or(mask, mask_forward)
# ----------------------------------------------------------------------
# calculate quantile
signal_masked = signal.copy()
signal_masked = signal_masked[~mask_all]
if n_quantiles == 1:
df_quantile = signal_masked.copy()
df_quantile.loc[:, :] = 1.0
else:
df_quantile = jutil.to_quantile(signal_masked, n_quantiles=n_quantiles)
# ----------------------------------------------------------------------
# stack
def stack_td_symbol(df):
df = pd.DataFrame(df.stack(dropna=False)) # do not dropna
df.index.names = ['trade_date', 'symbol']
df.sort_index(axis=0, level=['trade_date', 'symbol'], inplace=True)
return df
mask_all = stack_td_symbol(mask_all)
df_quantile = stack_td_symbol(df_quantile)
residual_ret = stack_td_symbol(residual_ret)
# ----------------------------------------------------------------------
# concat signal value
res = stack_td_symbol(signal)
res.columns = ['signal']
res['return'] = residual_ret
res['quantile'] = df_quantile
res = res.loc[~(mask_all.iloc[:, 0]), :]
print("Nan Data Count (should be zero) : {:d}; " \
"Percentage of effective data: {:.0f}%".format(res.isnull().sum(axis=0).sum(),
len(res) * 100. / signal.size))
res = res.astype({'signal': float, 'return': float, 'quantile': int})
self.signal_data = res
def show_fig(self, fig, file_name):
"""
Save fig object to self.output_folder/filename.
Parameters
----------
fig : matplotlib.figure.Figure
file_name : str
"""
self.fig_objs[file_name] = fig
if self.output_format in ['pdf', 'png', 'jpg']:
fp = os.path.join(self.output_folder, '.'.join([file_name, self.output_format]))
jutil.create_dir(fp)
fig.savefig(fp)
print("Figure saved: {}".format(fp))
elif self.output_format == 'base64':
fig_b64 = jutil.fig2base64(fig, 'png')
self.fig_data[file_name] = fig_b64
print("Base64 data of figure {} will be stored in dictionary.".format(file_name))
elif self.output_format == 'plot':
fig.show()
else:
raise NotImplementedError("output_format = {}".format(self.output_format))
@plotting.customize
def create_returns_report(self):
"""
Creates a tear sheet for returns analysis of a signal.
"""
n_quantiles = self.signal_data['quantile'].max()
# ----------------------------------------------------------------------------------
# Daily Signal Return Time Series
# Use regression or weighted average to calculate.
period_wise_long_ret =\
pfm.calc_period_wise_weighted_signal_return(self.signal_data, weight_method='long_only')
period_wise_short_ret = \
pfm.calc_period_wise_weighted_signal_return(self.signal_data, weight_method='short_only')
cum_long_ret = pfm.period_wise_ret_to_cum(period_wise_long_ret, period=self.period, compound=False)
cum_short_ret = pfm.period_wise_ret_to_cum(period_wise_short_ret, period=self.period, compound=False)
# period_wise_ret_by_regression = perf.regress_period_wise_signal_return(signal_data)
# period_wise_ls_signal_ret = \
# pfm.calc_period_wise_weighted_signal_return(signal_data, weight_method='long_short')
# daily_ls_signal_ret = pfm.period2daily(period_wise_ls_signal_ret, period=period)
# ls_signal_ret_cum = pfm.daily_ret_to_cum(daily_ls_signal_ret)
# ----------------------------------------------------------------------------------
# Period-wise Quantile Return Time Series
# We calculate quantile return using equal weight or market value weight.
# Quantile is already obtained according to signal values.
# quantile return
period_wise_quantile_ret_stats = pfm.calc_quantile_return_mean_std(self.signal_data, time_series=True)
cum_quantile_ret = pd.concat({k: pfm.period_wise_ret_to_cum(v['mean'], period=self.period, compound=False)
for k, v in period_wise_quantile_ret_stats.items()},
axis=1)
# top quantile minus bottom quantile return
period_wise_tmb_ret = pfm.calc_return_diff_mean_std(period_wise_quantile_ret_stats[n_quantiles],
period_wise_quantile_ret_stats[1])
cum_tmb_ret = pfm.period_wise_ret_to_cum(period_wise_tmb_ret['mean_diff'], period=self.period, compound=False)
# ----------------------------------------------------------------------------------
# Alpha and Beta
# Calculate using regression.
'''
weighted_portfolio_alpha_beta
tmb_alpha_beta =
'''
# start plotting
if self.output_format:
vertical_sections = 6
gf = plotting.GridFigure(rows=vertical_sections, cols=1)
gf.fig.suptitle("Returns Tear Sheet\n\n(no compound)\n (period length = {:d} days)".format(self.period))
plotting.plot_quantile_returns_ts(period_wise_quantile_ret_stats,
ax=gf.next_row())
plotting.plot_cumulative_returns_by_quantile(cum_quantile_ret,
ax=gf.next_row())
plotting.plot_cumulative_return(cum_long_ret,
title="Signal Weighted Long Only Portfolio Cumulative Return",
ax=gf.next_row())
plotting.plot_cumulative_return(cum_short_ret,
title="Signal Weighted Short Only Portfolio Cumulative Return",
ax=gf.next_row())
plotting.plot_mean_quantile_returns_spread_time_series(period_wise_tmb_ret, self.period,
bandwidth=0.5,
ax=gf.next_row())
plotting.plot_cumulative_return(cum_tmb_ret,
title="Top Minus Bottom (long top, short bottom)"
"Portfolio Cumulative Return",
ax=gf.next_row())
self.show_fig(gf.fig, 'returns_report')
self.returns_report_data = {'period_wise_quantile_ret': period_wise_quantile_ret_stats,
'cum_quantile_ret': cum_quantile_ret,
'cum_long_ret': cum_long_ret,
'cum_short_ret': cum_short_ret,
'period_wise_tmb_ret': period_wise_tmb_ret,
'cum_tmb_ret': cum_tmb_ret}
@plotting.customize
def create_information_report(self):
"""
Creates a tear sheet for information analysis of a signal.
"""
ic = pfm.calc_signal_ic(self.signal_data)
ic.index = pd.to_datetime(ic.index, format="%Y%m%d")
monthly_ic = pfm.mean_information_coefficient(ic, "M")
if self.output_format:
ic_summary_table = pfm.calc_ic_stats_table(ic)
plotting.plot_information_table(ic_summary_table)
columns_wide = 2
fr_cols = len(ic.columns)
rows_when_wide = (((fr_cols - 1) // columns_wide) + 1)
vertical_sections = fr_cols + 3 * rows_when_wide + 2 * fr_cols
gf = plotting.GridFigure(rows=vertical_sections, cols=columns_wide)
gf.fig.suptitle("Information Coefficient Report\n\n(period length = {:d} days)"
"\ndaily IC = rank_corr(period-wise forward return, signal value)".format(self.period))
plotting.plot_ic_ts(ic, self.period, ax=gf.next_row())
plotting.plot_ic_hist(ic, self.period, ax=gf.next_row())
# plotting.plot_ic_qq(ic, ax=ax_ic_hqq[1::2])
plotting.plot_monthly_ic_heatmap(monthly_ic, period=self.period, ax=gf.next_row())
self.show_fig(gf.fig, 'information_report')
self.ic_report_data = {'daily_ic': ic,
'monthly_ic': monthly_ic}
def create_binary_event_report(self, signal, price, mask, benchmark_price, periods,
join_method_periods='inner', group_by=None):
"""
Parameters
----------
signal : pd.DataFrame
price : pd.DataFrame
mask : pd.DataFrame
benchmark_price : pd.DataFrame
periods : list of int
join_method_periods : {'inner', 'outer'}.
Whether to take intersection or union of data of different periods.
group_by : {'year', 'month', None}
Calculate various statistics within each year/month/whole sample.
Returns
-------
res : dict
"""
import scipy.stats as scst
# Raw Data
dic_signal_data = OrderedDict()
for my_period in periods:
self.process_signal_before_analysis(signal, price=price, mask=mask,
n_quantiles=1, period=my_period,
benchmark_price=benchmark_price,
forward=True)
dic_signal_data[my_period] = self.signal_data
# Processed Data
dic_events = OrderedDict()
dic_all = OrderedDict()
for period, df in dic_signal_data.items():
ser_ret = df['return']
ser_sig = df['signal'].astype(bool)
events_ret = ser_ret.loc[ser_sig]
dic_events[period] = events_ret
dic_all[period] = ser_ret
df_events = pd.concat(dic_events, axis=1, join=join_method_periods)
df_all = pd.concat(dic_all, axis=1, join=join_method_periods)
# Data Statistics
def _calc_statistics(df):
df_res = pd.DataFrame(index=periods,
columns=['Annu. Ret.', 'Annu. Vol.',
#'Annual Return (all sample)', 'Annual Volatility (all sample)',
't-stat', 'p-value', 'skewness', 'kurtosis', 'occurance'],
data=np.nan)
df_res.index.name = 'Period'
ser_periods = pd.Series(index=df.columns, data=df.columns.values)
ratio = (1.0 * common.CALENDAR_CONST.TRADE_DAYS_PER_YEAR / ser_periods)
mean = df.mean(axis=0)
std = df.std(axis=0)
annual_ret, annual_vol = mean * ratio, std * np.sqrt(ratio)
t_stats, p_values = scst.ttest_1samp(df.values, np.zeros(df.shape[1]), axis=0)
df_res.loc[:, 't-stat'] = t_stats
df_res.loc[:, 'p-value'] = np.round(p_values, 5)
df_res.loc[:, "skewness"] = scst.skew(df, axis=0)
df_res.loc[:, "kurtosis"] = scst.kurtosis(df, axis=0)
df_res.loc[:, 'Annu. Ret.'] = annual_ret
df_res.loc[:, 'Annu. Vol.'] = annual_vol
df_res.loc[:, 'occurance'] = len(df)
# dic_res[period] = df
return df_res
if group_by == 'year':
grouper_func = jutil.date_to_year
elif group_by == 'month':
grouper_func = jutil.date_to_month
else:
grouper_func = get_dummy_grouper
idx_group = grouper_func(df_events.index.get_level_values('trade_date'))
df_stats = df_events.groupby(idx_group).apply(_calc_statistics)
idx_group_all = grouper_func(df_all.index.get_level_values('trade_date'))
df_all_stats = df_all.groupby(idx_group_all).apply(_calc_statistics)
df_all_stats = df_all_stats.loc[df_stats.index, ['Annu. Ret.', 'Annu. Vol.']]
df_all_stats.columns = ['Annu. Ret. (all samp)', 'Annu. Vol. (all samp)']
df_stats = pd.concat([df_stats, df_all_stats], axis=1)
# return df_all, df_events, df_stats
ser_signal_raw, monthly_signal, yearly_signal = calc_calendar_distribution(signal)
# return
# plot
gf = plotting.GridFigure(rows=len(np.unique(idx_group)) * len(periods) + 3, cols=2, height_ratio=1.2)
gf.fig.suptitle("Event Return Analysis (annualized)")
plotting.plot_calendar_distribution(ser_signal_raw,
monthly_signal=monthly_signal, yearly_signal=yearly_signal,
ax1=gf.next_row(), ax2=gf.next_row())
plotting.plot_event_bar(df_stats.reset_index(), x='Period', y='Annu. Ret.', hue='trade_date', ax=gf.next_row())
# plotting.plot_event_pvalue(df_stats['p-value'], ax=gf.next_subrow())
def _plot_dist(df):
date = grouper_func(df.index.get_level_values('trade_date'))[0]
plotting.plot_event_dist(df, group_by.title() + ' ' + str(date), axs=[gf.next_cell() for _ in periods])
if group_by is not None:
df_events.groupby(idx_group).apply(_plot_dist)
else:
plotting.plot_event_dist(df_events, "", axs=[gf.next_cell() for _ in periods])
self.show_fig(gf.fig, 'event_report')
# dic_res['df_res'] = df_res
return df_all, df_events, df_stats
@plotting.customize
def create_full_report(self):
"""
Creates a full tear sheet for analysis and evaluating single
return predicting (alpha) signal.
"""
# signal quantile description statistics
qstb = calc_quantile_stats_table(self.signal_data)
if self.output_format:
plotting.plot_quantile_statistics_table(qstb)
self.create_returns_report()
self.create_information_report()
# we do not do turnover analysis for now
# self.create_turnover_report(signal_data)
res = dict()
res.update(self.returns_report_data)
res.update(self.ic_report_data)
res.update(self.fig_data)
return res
def create_single_signal_report(self, signal, price, periods, n_quantiles, mask=None, trade_condition=None):
"""
Parameters
----------
signal : pd.Series
index is integer date, values are signals
price : pd.Series
index is integer date, values are prices
mask : pd.Series or None, optional
index is integer date, values are bool
periods : list of int
trade_condition : dict , optional
{'cond_name1': {'col_name': str, 'hold': int, 'filter': func, 'direction': 1},
'cond_name2': {'col_name': str, 'hold': int, 'filter': func, 'direction': -1},
}
Returns
-------
res : dict
"""
if isinstance(signal, pd.DataFrame):
signal = signal.iloc[:, 0]
if isinstance(price, pd.DataFrame):
price = price.iloc[:, 0]
# calc return
ret_l = {period: pfm.price2ret(price, period=period, axis=0) for period in periods}
df_ret = pd.concat(ret_l, axis=1)
# ----------------------------------------------------------------------
# calculate quantile
if n_quantiles == 1:
df_quantile = signal.copy()
df_quantile.loc[:] = 1.0
else:
df_quantile = jutil.to_quantile(signal, n_quantiles=n_quantiles, axis=0)
# ----------------------------------------------------------------------
# concat signal value
res = pd.DataFrame(signal.shift(1))
res.columns = ['signal']
res['quantile'] = df_quantile
res = pd.concat([res, df_ret], axis=1)
res = res.dropna()
print("Nan Data Count (should be zero) : {:d}; " \
"Percentage of effective data: {:.0f}%".format(res.isnull().sum(axis=0).sum(),
len(res) * 100. / signal.size))
# calc quantile stats
gp = res.groupby(by='quantile')
dic_raw = {k: v for k, v in gp}
dic_stats = OrderedDict()
for q, df in gp:
df_stat = pd.DataFrame(index=['mean', 'std'], columns=df_ret.columns, data=np.nan)
df_stat.loc['mean', :] = df.loc[:, df_ret.columns].mean(axis=0)
df_stat.loc['std', :] = df.loc[:, df_ret.columns].std(axis=0)
dic_stats[q] = df_stat
# calculate IC
ics = calc_various_ic(res, ret_cols=df_ret.columns)
# backtest
if trade_condition is not None:
def sim_backtest(df, dic_of_cond):
dic_cum_ret = dict()
for key, dic in dic_of_cond.items():
col_name = dic['column']
func = dic['filter']
n_hold = dic['hold']
direction = dic['direction']
mask = df[col_name].apply(func).astype(int)
dic_cum_ret[key] = (df[n_hold] * mask).cumsum() * direction
df_cumret = pd.concat(dic_cum_ret, axis=1)
return df_cumret
df_backtest = sim_backtest(res, trade_condition)
# plot
gf = plotting.GridFigure(rows=3, cols=1, height_ratio=1.2)
gf.fig.suptitle("Event Return Analysis (annualized)")
plotting.plot_ic_decay(ics, ax=gf.next_row())
plotting.plot_quantile_return_mean_std(dic_stats, ax=gf.next_row())
if trade_condition is not None:
plotting.plot_batch_backtest(df_backtest, ax=gf.next_row())
self.show_fig(gf.fig, 'single_inst.pdf')
def calc_ic(x, y, method='rank'):
"""
Calculate IC between x and y.
Parameters
----------
x : np.ndarray
y : np.ndarray
method : {'rank', 'normal'}
Returns
-------
corr : float
"""
import scipy.stats as scst
if method == 'rank':
corr = scst.spearmanr(x, y)[0]
elif method == 'normal':
corr = np.corrcoef(x, y)[0, 1]
else:
raise NotImplementedError("method = {}".format(method))
return corr
def calc_various_ic(df, ret_cols):
res_dic = dict()
# signal normal IC: signal value v.s. return
res_dic['normal'] = [calc_ic(df['signal'], df[col], method='normal') for col in ret_cols]
# signal rank IC: signal value v.s. return
res_dic['rank'] = [calc_ic(df['signal'], df[col], method='rank') for col in ret_cols]
# quantile normal IC: signal quantile v.s. return
res_dic['normal_q'] = [calc_ic(df['quantile'], df[col], method='normal') for col in ret_cols]
# quantile rank IC: signal quantile v.s. return
res_dic['rank_q'] = [calc_ic(df['quantile'], df[col], method='rank') for col in ret_cols]
res = | pd.DataFrame(index=ret_cols, data=res_dic) | pandas.DataFrame |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
| assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex) | pandas.util.testing.assert_frame_equal |
import os
import graco
import numpy as np
import pandas as pd
import networkx as nx
"""
Takes network, feature and metric as input and calculates distance matrix.
"""
# =============================================================================
# -------------------------------- FUNCTIONS --------------------------------
# =============================================================================
def get_feature_matrix(feature, G_nx):
if feature == 'GDV':
GDV = graco.orbits(G_nx)
return GDV
else:
GCV = graco.coefficients(G_nx)
# Single equation sets
if feature == 'GCV-D':
feature_matrix = GCV['D']
elif feature == 'GCV-A':
feature_matrix = GCV['A']
elif feature == 'GCV-G':
feature_matrix = GCV['G']
elif feature == 'GCV-G-sym':
feature_matrix = GCV['G'][['0-0','1-1','3-3']]
elif feature == 'GCV-O':
feature_matrix = GCV['O']
elif feature == 'GCV-3':
feature_matrix = GCV[['A','D']].xs('0', axis=1, level='Equation')
# Combined equation sets
elif feature == 'GCV-DA':
feature_matrix = GCV[['D','A']]
elif feature == 'GCV-DG':
feature_matrix = GCV[['D','G']]
elif feature == 'GCV-DO':
feature_matrix = GCV[['D','O']]
elif feature == 'GCV-all':
feature_matrix = GCV.drop(('G','0-0'), axis=1)
elif feature == 'GCV-DG-2':
feature_matrix = GCV[['D','G']].drop(('G','2-1'), axis=1)
elif feature == 'GCV-DG-3':
feature_matrix = GCV[['D','G']].drop(('G','3-3'), axis=1)
elif feature == 'GCV-DG-sym':
GCV_G_sym = GCV['G'][['0-0','1-1','3-3']]
feature_matrix = | pd.concat([GCV['D'],GCV_G_sym], axis=1) | pandas.concat |
from pathlib import Path
import os
import re
import pandas as pd
import numpy as np
import random
from math import ceil
import cv2
import glob
import shutil
import experiment_code.constants as consts
from experiment_code.targetfile_utils import Utils
# import experiment_code.targetfile_utils as utils
# create instances of directories
class VisualSearch(Utils):
"""
This class makes target files for Visual Search using parameters set in __init__
Args:
task_name (str): 'visual_search'
orientations (int): orientations of target/distractor stims
balance_blocks (dict): keys are 'condition_name', 'trial_type'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'visual_search'
self.orientations = list([90, 180, 270, 360]) # DO NOT CHANGE ORDER
self.balance_blocks = {'condition_name': {'easy': '4', 'hard': '8'}, 'trial_type': [True, False]}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 2
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'right'
self.replace = False
self.display_trial_feedback = True
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def _create_columns(self):
def _get_condition(x):
for key in self.balance_blocks['condition_name'].keys():
cond = self.balance_blocks['condition_name'][key]
if x==cond:
value = key
return value
dataframe = pd.DataFrame()
# make `condition_name` column
conds = [self.balance_blocks['condition_name'][key] for key in self.balance_blocks['condition_name'].keys()]
# conds = [self.balance_blocks['condition_name']['easy'], self.balance_blocks['condition_name']['hard']]
dataframe['stim'] = self.num_trials*conds
dataframe['condition_name'] = dataframe['stim'].apply(lambda x: _get_condition(x))
dataframe['stim'] = dataframe['stim'].astype(int)
# make `trial_type` column
dataframe['trial_type'] = self.num_trials*self.balance_blocks['trial_type']
dataframe['trial_type'] = dataframe['trial_type'].sort_values().reset_index(drop=True)
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=True)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
return dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
def _save_visual_display(self, dataframe):
# add visual display cols
display_pos, orientations_correct = zip(*[self._make_search_display(cond, self.orientations, trial_type) for (cond, trial_type) in zip(dataframe["stim"], dataframe["trial_type"])])
data_dicts = []
for trial_idx, trial_conditions in enumerate(display_pos):
for condition, point in trial_conditions.items():
data_dicts.append({'trial': trial_idx, 'stim': condition, 'xpos': point[0], 'ypos': point[1], 'orientation': orientations_correct[trial_idx][condition]})
# save out to dataframe
df_display = pd.DataFrame.from_records(data_dicts)
# save out visual display
visual_display_name = self._get_visual_display_name()
df_display.to_csv(os.path.join(self.target_dir, visual_display_name))
def _get_visual_display_name(self):
tf_name = f"{self.task_name}_{self.block_dur_secs}sec"
tf_name = self._get_target_file_name(tf_name)
str_part = tf_name.partition(self.task_name)
visual_display_name = 'display_pos' + str_part[2]
return visual_display_name
def _make_search_display(self, display_size, orientations, trial_type):
# make location and orientations lists (for target and distractor items)
# STIM POSITIONS
grid_h_dva = 8.4
grid_v_dva = 11.7
n_h_items = 6
n_v_items = 8
item_h_pos = np.linspace(-grid_h_dva / 2.0, +grid_h_dva/ 2.0, n_h_items)
item_v_pos = np.linspace(-grid_v_dva / 2.0, +grid_v_dva / 2.0, n_v_items)
grid_pos = []
for curr_h_pos in item_h_pos:
for curr_v_pos in item_v_pos:
grid_pos.append([curr_h_pos, curr_v_pos])
locations = random.sample(grid_pos, display_size)
## STIM ORIENTATIONS
orientations_list = orientations*int(display_size/4)
# if trial type is false - randomly replace target stim (90)
# with a distractor
if not trial_type:
orientations_list = [random.sample(orientations[1:],1)[0] if x==90 else x for x in orientations_list]
# if trial is true and larger than 4, leave one target stim (90) in list
# and randomly replace the others with distractor stims
if display_size >4 and trial_type:
indices = [i for i, x in enumerate(orientations_list) if x == 90]
indices.pop(0)
new_num = random.sample(orientations[1:],2) # always assumes that orientations_list is as follows: [90,180,270,360]
for i, n in zip(*(indices, new_num)):
orientations_list[i] = n
return dict(enumerate(locations)), dict(enumerate(orientations_list))
def make_targetfile(self, **kwargs):
"""
makes target file(s) for visual search given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
seeds = np.arange(self.num_blocks)+1
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (2Back- and 2Back+) are equally represented
self.random_state = seeds[self.block]
# create the dataframe
df_target = self._create_columns()
# balance the dataframe
df_target = self._balance_design(dataframe = df_target)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
# save visual display dataframe
self._save_visual_display(dataframe = df_target)
# save target file
self._save_target_files(df_target)
class NBack(Utils):
"""
This class makes target files for N Back using parameters set in __init__
Args:
task_name (str): 'n_back'
n_back (int): default is 2
balance_blocks (dict): keys are 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'n_back'
self.n_back = 2
self.balance_blocks = {'condition_name': {'easy': '2_back-', 'hard': '2_back+'}}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 1.5
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'left'
self.replace = False
self.display_trial_feedback = True
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def _create_columns(self):
def _get_condition(x):
for key in self.balance_blocks['condition_name'].keys():
cond = self.balance_blocks['condition_name'][key]
if x==cond:
value = key
return value
# make trial_type column
dataframe = pd.DataFrame()
dataframe['trial_type'] = self.num_stims*(True, False)
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
dataframe['trial_type'][:self.n_back] = False # first n+cond_type trials (depending on cond_type) have to be False
# make `n_back` and `condition_name` cols
conds = [self.balance_blocks['condition_name'][key] for key in self.balance_blocks['condition_name'].keys()]
dataframe['n_back'] = np.where(dataframe["trial_type"]==False, conds[0], conds[1])
dataframe['condition_name'] = dataframe['n_back'].apply(lambda x: _get_condition(x))
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# load in stimuli
stim_files = [f for f in os.listdir(str(consts.stim_dir / self.task_name)) if f.endswith('g')]
# first two images are always random (and false)
# all other images are either match or not a match
random.seed(self.random_state)
stim_list = random.sample(stim_files, k=self.n_back)
for t in dataframe['trial_type'][self.n_back:]: # loop over n+self.n_back
match_img = stim_list[-self.n_back]
no_match_imgs = [stim for stim in stim_files if stim != match_img] # was match_img[0]
if t == False: # not a match
random.seed(self.random_state)
stim_list.append(random.sample(no_match_imgs, k=self.n_back-1))
else: # match
stim_list.append(match_img)
dataframe["stim"] = [''.join(x) for x in stim_list]
return dataframe
def make_targetfile(self, **kwargs):
"""
makes target file(s) for n back given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
seeds = np.arange(self.num_blocks)+1
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (2Back- and 2Back+) are equally represented
self.random_state = seeds[self.block]
# create the dataframe
df_target = self._create_columns()
# balance the dataframe
df_target = self._balance_design(dataframe = df_target)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class SocialPrediction(Utils):
"""
This class makes target files for Social Prediction using parameters set in __init__
Args:
task_name (str): 'social_prediction'
dataset_name (str): 'homevideos' is the default
logging_file (str): csv file containing info about stimuli
video_name (list of str): name of video(s) to include
resized (bool): resize frames of video
balance_blocks (dict): keys are 'actors', 'condition_name', 'label'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'social_prediction'
self.dataset_name = 'homevideos'
self.logging_file = 'homevideos_annotations_logging.csv'
self.video_name = ['dynamic_0ms', 'dynamic_100ms']
self.resized = True
self.balance_blocks = {'actors': ['SB', 'MK'],
'condition_name': {'dynamic_0ms': 'easy', 'dynamic_100ms': 'hard'},
'label': ['hug', 'handShake']}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 2.5
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'right'
self.replace = False
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
# remove all filenames where any of the videos have not been extracted
stims_to_remove = dataframe.query('extracted==False')["video_name"].to_list()
df_filtered = dataframe[~dataframe["video_name"].isin(stims_to_remove)]
# query rows with relevant videos and relevant labels
label = self.balance_blocks['label']
actors = self.balance_blocks['actors']
df_filtered = df_filtered.query(f'condition_name=={self.video_name} and label=={label} and actors=={actors}')
return df_filtered
def _create_new_columns(self, dataframe):
# make new `stim`
if self.resized:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '_resized' + '.mp4'
else:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '.mp4'
# set `condition name`
dataframe['condition_name'] = dataframe['condition_name'].apply(lambda x: self.balance_blocks['condition_name'][x])
# assign dataset name
dataframe['dataset'] = self.dataset_name
# assign trial type (only works currently for two labels)
labels = self.balance_blocks['label']
if len(labels)==2:
dataframe['trial_type'] = dataframe['label'].apply(lambda x: True if x==labels[0] else False)
else:
print(f'there are an incorrect number of labels, there should be two')
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for social prediction given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`trial_type` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class SemanticPrediction(Utils):
"""
This class makes target files for Semantic Prediction using parameters set in __init__
Args:
task_name (str): 'semantic_prediction'
logging_file (str): csv file containing info about stimuli
stem_word_dur (int): length of stem word (sec)
last_word_dur (int): length of last word (sec)
frac (int): proportion of meaningless trials. default is .3.
balance_blocks (dict): keys are 'CoRT_descript', 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'semantic_prediction'
self.logging_file = 'sentence_validation.csv'
self.stem_word_dur = 0.5
self.last_word_dur = 1.5
self.frac = .3
self.balance_blocks = {'CoRT_descript': ['strong non-CoRT', 'strong CoRT'],
'condition_name': {'high cloze': 'easy', 'low cloze': 'hard'}}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 7
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'right'
self.replace = False
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
# conds = [self.balance_blocks['condition_name'][key] for key in self.balance_blocks['condition_name'].keys()]
conds = list(self.balance_blocks['condition_name'].keys())
dataframe = dataframe.query(f'CoRT_descript=={self.balance_blocks["CoRT_descript"]} and cloze_descript=={conds}')
# strip erroneous characters from sentences
dataframe['stim'] = dataframe['full_sentence'].str.replace('|', ' ')
return dataframe
def _create_new_columns(self, dataframe):
# add condition column
dataframe['condition_name'] = dataframe['cloze_descript'].apply(lambda x: self.balance_blocks['condition_name'][x])
dataframe['stem_word_dur'] = self.stem_word_dur
dataframe['last_word_dur'] = self.last_word_dur
dataframe['trial_dur_correct'] = (dataframe['word_count'] * dataframe['stem_word_dur']) + self.iti_dur + dataframe['last_word_dur']
dataframe['display_trial_feedback'] = self.display_trial_feedback
dataframe.drop({'full_sentence'}, inplace=True, axis=1)
return dataframe
def _add_random_word(self, dataframe, columns):
""" sample `frac_random` and add to `full_sentence`
Args:
dataframe (pandas dataframe): dataframe
Returns:
dataframe with modified `full_sentence` col
"""
idx = dataframe.groupby(columns).apply(lambda x: x.sample(frac=self.frac, replace=False, random_state=self.random_state)).index
sampidx = idx.get_level_values(len(columns)) # get third level
dataframe["trial_type"] = ~dataframe.index.isin(sampidx)
dataframe["last_word"] = dataframe.apply(lambda x: x["random_word"] if not x["trial_type"] else x["target_word"], axis=1)
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for semantic prediction given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`condition_name` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
# add random word based on `self.frac`
df_target = self._add_random_word(dataframe=df_target, columns=['condition_name']) # 'CoRT_descript'
# save out target files
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class ActionObservation(Utils):
"""
This class makes target files for Action Observation using parameters set in __init__
Args:
task_name (str): 'rest'
logging_file (str): csv file containing info about stimuli
video_name (list of str): name of video(s) to include
manipulation (str): 'left_right' or 'miss_goal'
resized (bool): resize frames of video
balance_blocks (dict): keys are 'player_name', 'condition_name', 'trial_type'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = "action_observation"
self.logging_file = 'all_clips_annotation_logging.csv'
self.video_name = ['dynamic_120ms']
self.manipulation = 'left_right'
self.resized = True
self.balance_blocks = {'player_name': ['DC', 'EW'], 'condition_name': ['easy', 'hard'], 'trial_type': ['left', 'right']}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 2
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'left'
self.replace = True # sample with or without replacement
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
def _get_player(x):
if x.find('DC')>=0:
player_name = 'DC'
elif x.find('FI')>=0:
player_name = 'FI'
elif x.find('EW')>=0:
player_name = 'EW'
else:
print('player does not exist')
return player_name
# remove all filenames where any of the videos have not been extracted
# and where the player did not accurately hit the target (success=F)
stims_to_remove = dataframe.query('extracted==False or player_success=="?"')["video_name"].to_list()
df_filtered = dataframe[~dataframe["video_name"].isin(stims_to_remove)]
# remove rows without video info
df_filtered = df_filtered.query(f'condition_name=={self.video_name}')
# create `player_name`
df_filtered['player_name'] = df_filtered['video_name'].apply(lambda x: _get_player(x))
# filter `player_name`
cond = self.balance_blocks['player_name']
df_filtered = df_filtered.query(f'player_name=={cond}')
# figure out the actual hits. certain trials (~14%) were misses. enter the actual hit.
df_filtered.loc[df_filtered['hit_target'].isnull(), 'hit_target'] = df_filtered['instructed_target']
return df_filtered
def _create_new_columns(self, dataframe):
def _get_condition(x):
if self.manipulation=="miss_goal":
easy = [1,2,7,8,9,10,15,16]
hard = [3,4,5,6,11,12,13,14]
elif self.manipulation=="left_right":
easy = [1,2,3,4,13,14,15,16]
hard = [5,6,7,8,9,10,11,12]
else:
print('manipulation does not exist')
if x in easy:
condition = "easy"
elif x in hard:
condition = "hard"
else:
condition = float("NaN")
print(f'{x} not in list')
return condition
def _get_trial_type(x):
if self.manipulation=="miss_goal":
list1= [5,6,7,8,9,10,11,12]
list2 = [1,2,3,4,13,14,15,16]
value1 = "goal"
value2 = "miss"
elif self.manipulation=="left_right":
list1 = [1,2,3,4,5,6,7,8]
list2 = [9,10,11,12,13,14,15,16]
value1 = True # 'right'
value2 = False # 'left'
else:
print('manipulation does not exist')
if x in list1:
trial = value1
elif x in list2:
trial = value2
else:
trial = float("NaN")
print(f'{x} not in list')
return trial
# make new image column
if self.resized:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '_resized' + '.mp4'
else:
dataframe['stim'] = dataframe['video_name'] + '_' + dataframe['condition_name'] + '.mp4'
# divide targets between easy and hard
dataframe['condition_name'] = dataframe['hit_target'].apply(lambda x: _get_condition(x))
# either miss_goal or left_right based on manipulation
dataframe['trial_type'] = dataframe['hit_target'].apply(lambda x: _get_trial_type(x))
# get time of extraction for video (round to two decimals)
dataframe['video_start_time'] = np.round(dataframe['interact_start'] - dataframe['secs_before_interact'], 2)
dataframe['video_end_time'] = np.round(dataframe['interact_start'] + dataframe['secs_after_interact'], 2)
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for action observation given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`trial_type` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class TheoryOfMind(Utils):
"""
This class makes target files for Theory of Mind using parameters set in __init__
Args:
task_name (str): 'theory_of_mind'
logging_file (str): csv file containing info about stimuli
story_dur (int): length of story (sec)
question_dur (int): length of question (sec)
frac (int): proportion of meaningless trials. default is .3.
balance_blocks (dict): keys are 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'theory_of_mind'
self.logging_file = 'theory_of_mind.csv'
self.story_dur = 10
self.question_dur = 4
self.frac = .3
self.balance_blocks = {'condition_name': ['belief','photo'],'trial_type': [True, False]}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 14
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'left'
self.replace = False
self.display_trial_feedback = True
def _filter_dataframe(self, dataframe):
dataframe = dataframe.query(f'condition=={self.balance_blocks["condition_name"]} and response=={self.balance_blocks["trial_type"]}')
return dataframe
def _create_new_columns(self, dataframe):
# add condition column
# dataframe['condition_name'] = dataframe['condition'].apply(lambda x: self.balance_blocks['condition_name'][x])
dataframe['condition_name'] = dataframe['condition']
dataframe['story_dur'] = self.story_dur
dataframe['question_dur'] = self.question_dur
dataframe['trial_dur_correct'] = dataframe['story_dur'] + self.iti_dur + dataframe['question_dur']
dataframe['display_trial_feedback'] = self.display_trial_feedback
responses = self.balance_blocks['trial_type']
dataframe['trial_type'] = dataframe['response'].apply(lambda x: True if x==responses[0] else False)
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for theory of mind given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`condition_name` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
# save out target files
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class VerbGeneration(Utils):
"""
This class makes target files for Theory of Mind using parameters set in __init__
Args:
task_name (str): 'verb_generation'
logging_file (str): csv file containing info about stimuli
story_dur (int): length of story (sec)
question_dur (int): length of question (sec)
frac (int): proportion of meaningless trials. default is .3.
balance_blocks (dict): keys are 'condition_name'
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'verb_generation'
self.logging_file = 'verb_generation.csv'
self.frac = .3
self.balance_blocks = {'session_list': ['1','2']}
self.block_dur_secs = 15
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 1.6
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'None'
self.replace = False
self.display_trial_feedback = False
def _filter_dataframe(self, dataframe):
dataframe = dataframe.query(f'session_list=={self.balance_blocks["session_list"]}')
return dataframe
def _create_new_columns(self, dataframe):
# add condition column
dataframe['session_list'] = dataframe['session_list']
dataframe['trial_dur'] = self.trial_dur
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for theory of mind given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`condition_name` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how ='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
# save out target files
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class RomanceMovie(Utils):
"""
This class makes target files for Social Prediction using parameters set in __init__
Args:
task_name (str): 'romance_movie'
logging_file (str): csv file containing info about stimuli
video_name (list of str): name of video(s) to include
resized (bool): resize frames of video
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'romance_movie'
self.logging_file = 'romance_movie_logging.csv'
self.video_name = ['romance']
self.balance_blocks = {'condition_name': ['romance']}
self.block_dur_secs = 30
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 20
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'None'
self.replace = False
self.display_trial_feedback = False
def _filter_dataframe(self, dataframe):
# remove all filenames where any of the videos have not been extracted
stims_to_remove = dataframe.query('extracted==False')["video_name"].to_list()
df_filtered = dataframe[~dataframe["video_name"].isin(stims_to_remove)]
return df_filtered
def _create_new_columns(self, dataframe):
# make new `stim`
dataframe['stim'] = dataframe['video_name'] + '.mov'
# set `condition name`
dataframe['condition_name'] = dataframe['condition_name']
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for social prediction given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`trial_type` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class ActionObservationKnots(Utils):
"""
This class makes target files for Action Observation Knots using parameters set in __init__
Args:
task_name (str): 'action_observation_knots'
logging_file (str): csv file containing info about stimuli
video_name (list of str): name of video(s) to include
resized (bool): resize frames of video
block_dur_secs (int): length of task_name (sec)
num_blocks (int): number of blocks to make
tile_block (int): determines number of repeats for task_name
trial_dur (int): length of trial (sec)
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
replace (bool): sample stim with or without replacement
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'act_obs_knots'
self.logging_file = 'action_observation_knots_logging.csv'
self.video_name_action = ['knotAction']
self.video_name_control = ['knotControl']
self.balance_blocks = {'condition_name': ['knot'], 'session_list': [1,2]}
self.block_dur_secs = 30
self.num_blocks = 5
self.tile_block = 1
self.trial_dur = 15
self.iti_dur = .5
self.instruct_dur = 5
self.hand = 'None'
self.replace = False
self.display_trial_feedback = False
def _filter_dataframe(self, dataframe):
# remove all filenames where any of the videos have not been extracted
stims_to_remove = dataframe.query('extracted==False')["video_name_action"].to_list()
df_filtered = dataframe[~dataframe["video_name_action"].isin(stims_to_remove)]
return df_filtered
def _create_new_columns(self, dataframe):
# make new `stim`
dataframe['stim_action'] = dataframe['video_name_action'] + '.mov'
dataframe['stim_control'] = dataframe['video_name_control'] + '.mov'
dataframe['session_list'] = dataframe['session_list']
# set `condition name`
dataframe['condition_name'] = dataframe['condition_name']
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def _balance_design(self, dataframe):
# group the dataframe according to `balance_blocks`
dataframe = dataframe.groupby([*self.balance_blocks], as_index=False).apply(lambda x: x.sample(n=self.num_stims, random_state=self.random_state, replace=self.replace)).reset_index(drop=True)
# ensure that only `num_trials` are sampled
dataframe = dataframe.sample(n=self.num_trials, random_state=self.random_state, replace=False).reset_index(drop=True)
return dataframe
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('block_dur_secs'):
self.block_dur_secs = kwargs['block_dur_secs']
# repeat the target files
if kwargs.get('tile_block'):
self.tile_block = kwargs['tile_block']
# num of blocks (i.e. target files) to make
if kwargs.get('num_blocks'):
self.num_blocks = kwargs['num_blocks'] * self.tile_block
# get overall number of trials
self.num_trials = int(self.block_dur_secs / (self.trial_dur + self.iti_dur))
# get `num_stims` - lowest denominator across `balance_blocks`
denominator = np.prod([len(stim) for stim in [*self.balance_blocks.values()]])
self.num_stims = ceil(self.num_trials / denominator) # round up to nearest int
def make_targetfile(self, **kwargs):
"""
makes target file(s) for social prediction given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# return logging file
fpath = os.path.join(consts.stim_dir, self.task_name, self.logging_file)
# read in stimulus dataframe
df = pd.read_csv(fpath)
# filter dataframe
df_filtered = self._filter_dataframe(dataframe = df)
# create new columns (`trial_type` etc)
df_filtered = self._create_new_columns(dataframe = df_filtered)
seeds = np.arange(self.num_blocks)+1
# for self.block, self.key in enumerate(self.block_design):
for self.block in np.arange(self.num_blocks):
# randomly sample so that conditions (easy and hard) are equally represented
self.random_state = seeds[self.block]
# balance the dataframe by `condition_name` and `player_num`
df_target = self._balance_design(dataframe = df_filtered)
# remove `df_target` rows from the main dataframe so that we're always sampling from unique rows
if self.replace==False:
df_filtered = df_filtered.merge(df_target, how='left', indicator=True)
df_filtered = df_filtered[df_filtered['_merge'] == 'left_only'].drop('_merge', axis=1)
self.target_dir = os.path.join(consts.target_dir, self.task_name)
self._save_target_files(df_target)
class Rest(Utils):
"""
This class makes target files for Rest using parameters set in __init__
Args:
task_name (str): 'rest'
rest_dur_secs (int): length of rest (sec), 0 if no rest
iti_dur (iti): length of iti (sec)
instruct_dur (int): length of instruct for task_names (sec)
hand (str): response hand
num_trials (int): number of trials per block. default is 1
display_trial_feedback (bool): display trial-by-trial feedback
"""
def __init__(self):
super().__init__()
self.task_name = 'rest'
self.rest_dur_secs = 10
self.iti_dur = 0
self.instruct_dur = 0
self.hand = "None"
self.num_trials = 1
self.display_trial_feedback = False
def _get_block_info(self, **kwargs):
# length (in secs) of the block
if kwargs.get('rest_dur_secs'):
self.trial_dur = kwargs['rest_dur_secs']
else:
self.trial_dur = self.rest_dur_secs
def _create_new_columns(self):
start_time = np.round(np.arange(0, self.num_trials*(self.trial_dur+self.iti_dur), self.trial_dur+self.iti_dur), 1)
data = {"stim": 'fixation', "trial_dur":self.trial_dur, "iti_dur":self.iti_dur, "start_time":start_time, "hand": self.hand}
dataframe = pd.DataFrame.from_records(data)
dataframe['display_trial_feedback'] = self.display_trial_feedback
return dataframe
def make_targetfile(self, **kwargs):
"""
makes target file(s) for rest given parameters in __init__
"""
# get info about block
self._get_block_info(**kwargs)
# save target file
self.target_name = self.task_name + '_' + str(self.rest_dur_secs) + 'sec.csv'
# create dataframe
dataframe = self._create_new_columns()
target_dir = os.path.join(consts.target_dir, self.task_name)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
dataframe.to_csv(os.path.join(target_dir, self.target_name), index = False, header = True)
return self.target_name
class MakeFiles:
"""
This class makes run and target files using parameters set in __init__
Args:
task_names (list of str): options are 'visual_search', 'n_back', 'social_prediction', 'semantic_prediction', 'action_observation'
feedback_types (list of str): options are 'rt' and 'acc'
run_name_prefix (str): prefix of run name
tile_run (int): determines number of block repeats within a run
instruct_dur (int): length of instruct for task_names (sec)
block_dur_secs (int): length of task_name (sec)
rest_dur_secs (int): length of rest (sec), 0 if no rest
num_runs (int): number of runs
counterbalance_runs (bool): counterbalance block order across runs
"""
def __init__(self):
self.task_names = ['visual_search', 'theory_of_mind', 'n_back', 'social_prediction', 'semantic_prediction', 'action_observation', 'verb_generation', 'romance_movie', 'act_obs_knots']
self.feedback_types = ['rt', 'acc', 'rt', 'acc', 'rt', 'acc', 'None', 'None', 'None']
self.run_name_prefix = 'run'
self.tile_run = 1
self.instruct_dur = 5
self.block_dur_secs = 30
self.rest_dur_secs = 10
self.num_runs = 5
self.counterbalance_runs = True
def _create_run_dataframe(self, target_files):
for iter, target_file in enumerate(target_files):
# load target file
dataframe = pd.read_csv(target_file)
start_time = dataframe.iloc[0]['start_time'] + self.cum_time
end_time = dataframe.iloc[-1]['start_time'] + dataframe.iloc[-1]['trial_dur'] + self.instruct_dur + self.cum_time
target_file_name = Path(target_file).name
num_sec = re.findall(r'\d+(?=sec)', target_file)[0]
target_num = re.findall(r'\d+(?=.csv)', target_file)[0]
num_trials = len(dataframe)
data = {'task_name': self.task_name, 'task_num': self.task_num+1, # 'block_iter': iter+1
'num_trials': num_trials, 'target_num': target_num, 'num_sec': num_sec,
'target_file': target_file_name, 'start_time': start_time, 'end_time': end_time,
'instruct_dur': self.instruct_dur, 'feedback_type': self.feedback_type}
self.all_data.append(data)
self.cum_time = end_time
def _save_run_file(self, run_name):
# make dataframe from a dictionary
df_run = pd.DataFrame.from_dict(self.all_data)
# save out to file
df_run.to_csv(os.path.join(consts.run_dir, run_name), index=False, header=True)
def _add_rest(self):
run_files = sorted(glob.glob(os.path.join(consts.run_dir, f'*{self.run_name_prefix}*')))
# make target file
BlockClass = TASK_MAP['rest']
block = BlockClass()
self.target_name = block.make_targetfile(block_dur_secs = self.rest_dur_secs)
for run_file in run_files:
dataframe = pd.read_csv(run_file)
dataframe = self._add_rest_rows(dataframe)
dataframe.to_csv(run_file, index = False, header = True)
def _counterbalance_runs(self):
pass
def _check_task_run(self):
# check if task exists in dict
exists_in_dict = [True for key in self.target_dict.keys() if self.task_name==key]
if not exists_in_dict:
self.target_dict.update({self.task_name: self.fpaths})
# create run dataframe
random.seed(2)
target_files_sample = [self.target_dict[self.task_name].pop(random.randrange(len(self.target_dict[self.task_name]))) for _ in np.arange(self.tile_run)]
return target_files_sample
def _insert_row(self, row_number, dataframe, row_value):
# Slice the upper half of the dataframe
df1 = dataframe[0:row_number]
# Store the result of lower half of the dataframe
df2 = dataframe[row_number:]
# Insert the row in the upper half dataframe
df1.loc[row_number]=row_value
# Concat the two dataframes
df_result = | pd.concat([df1, df2]) | pandas.concat |
# ---
# jupyter:
# jupytext:
# formats: ipynb,../../tests/notebooks//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %load_ext autoreload
# %autoreload 2
import os
import pickle as pkl
from typing import Dict, Any
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.dpi'] = 250
# change working directory to project root
if os.getcwd().split('/')[-1] == 'notebooks':
os.chdir('../..')
from experiments.notebooks import viz
def get_comparison_result(path: str, estimator_name: str, test=False) -> Dict[str, Any]:
if test:
result_file = path + 'test/' + f'{estimator_name}_test_comparisons.pkl'
else:
result_file = path + 'val/' + f'{estimator_name}_comparisons.pkl'
return pkl.load(open(result_file, 'rb'))
MODEL_COMPARISON_PATH = 'experiments/comparison_data/'
datasets = [
("breast-cancer", 13),
("breast-w", 15),
("credit-g", 31),
("haberman", 43),
("heart", 1574),
("labor", 4),
("vote", 56),
]
# %% [markdown]
# # dataset stats
# %%
metadata = []
columns = ['name', 'samples', 'features', 'class 0 ct', 'class 1 ct', 'majority class %']
for dataset_name, data_id in datasets:
dataset = fetch_openml(data_id=data_id, as_frame=False)
shape = dataset.data.shape
class_counts = np.unique(dataset.target, return_counts=True)[1]
metadata.append([dataset_name, shape[0], shape[1], class_counts[0], class_counts[1], np.max(class_counts) / np.sum(class_counts)])
| pd.DataFrame(metadata, columns=columns) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([ | Timestamp('20130301') | pandas.tseries.index.Timestamp |
from cdo_api_py import Client
import pandas as pd
from datetime import datetime
from pprint import pprint
# initialize a client with a developer token ,
# note 5 calls per second and 1000 calls per day limit for each token
token = "my token <PASSWORD>!"
my_client = Client(token, default_units=None, default_limit=1000)
# the other valid option for units is 'standard', and default_limit maxes out at 1000
# first lets see what endpoints are associated with the API
# you can read more about this from NOAAs NCDC at
# https://www.ncdc.noaa.gov/cdo-web/webservices/v2#gettingStarted
pprint(my_client.list_endpoints())
# request a list of available datasets (about 11) with
pprint(my_client.list_datasets())
# there are more than 1000 datatypes, but you can see them all with
pprint(my_client.list_datatypes())
# define the extent we are interested in. in this case the DC metro area.
extent = {
"north": 39.14,
"south": 38.68,
"east": -76.65,
"west": -77.35,
}
# lets define the date range we're interested in as well, December 2016
startdate = datetime(2016, 12, 1)
enddate = datetime(2016, 12, 31)
# after examining the available datasets, we decided 'GHCND' is the one we want,
# and that we really want daily min and max temperatures
datasetid='GHCND'
datatypeid=['TMIN', 'TMAX', 'PRCP']
# lets find stations that meet all our criteria
stations = my_client.find_stations(
datasetid=datasetid,
extent=extent,
startdate=startdate,
enddate=enddate,
datatypeid=datatypeid,
return_dataframe=True)
pprint(stations)
# we can get big lists of station data with
big_df = pd.DataFrame()
for rowid, station in stations.iterrows(): # remember this is a pandas dataframe!
station_data = my_client.get_data_by_station(
datasetid=datasetid,
stationid=station['id'], # remember this is a pandas dataframe
startdate=startdate,
enddate=enddate,
return_dataframe=True, # this defaults to True
include_station_meta=True # flatten station metadata with ghcnd readings
)
if isinstance(station_data, list):
continue
pprint(station_data)
big_df = | pd.concat([big_df, station_data], sort=False) | pandas.concat |
#!/usr/bin/env python
"""
Pseudo-bulk profile computation
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2021"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import Catactor
from .mini_catactor import run_mini_catactor
import argparse
import pickle
import scanpy as sc
import numpy as np
from scipy import sparse
from sklearn.preprocessing import binarize
import argparse
import pickle
import matplotlib.pyplot as plt
import os
import pandas as pd
import collections
def genome_binning(column_ann,
min_bin=1000, max_bin=10000, step_bin=1000,
bin_prefix='global_index',
verbose=True):
if 'chr' not in column_ann.columns or \
'start' not in column_ann.columns or \
'end' not in column_ann.columns:
return column_ann
column_ann = column_ann.sort_values(by=['chr', 'start'])
if verbose:
print('-- Binning with different sizes', min_bin, max_bin, step_bin)
index_list = [x.replace(bin_prefix, '').lstrip('_') for x in column_ann.columns if bin_prefix in x]
min_data_bin = min([int(i) if i != '' else step_bin for i in index_list])
min_step = max(min_data_bin, step_bin)
column_ann = column_ann.loc[~pd.isna(column_ann.loc[:,'start']),:]
start_index = column_ann.loc[:,"start"]
peak_flag = False
if (start_index%10 != 0).any() and ((start_index-1)%10 != 0).any(): # peak data inference
peak_flag = True
start_index = (column_ann["end"]+column_ann["start"])/2
if verbose:
print('-- Construct', 'peak_flag=', peak_flag)
if len([x for x in index_list if x != '']) > 0:
print('Columns already contain :', index_list)
for step in range(max(min_bin, min_step), max_bin+min_step, min_step):
if verbose: print(' --- Step:', step)
if bin_prefix+'_'+str(step) in column_ann.columns:
column_ann = column_ann.drop(columns=bin_prefix+'_'+str(step))
column_ann.loc[:, "chr_index"] = np.floor(start_index/step).astype(int).values
candidates = pd.DataFrame({'chr':column_ann.loc[:,"chr"].values, "chr_index":column_ann.loc[:,"chr_index"].values})
candidates.drop_duplicates(keep='first', inplace=True)
candidates.loc[:, bin_prefix+'_'+str(step)] = list(range(0, candidates.shape[0]))
column_ann = column_ann.merge(candidates, how='left', on=["chr", "chr_index"])
column_ann = column_ann.drop(columns=['chr_index'])
return column_ann
def compute_column_conversion_matrix(column_max, gene_file, projection_mat, projection_ann):
new_column_ann = pd.read_csv(re.sub('_with_bins.*.csv', projection_ann, gene_file), index_col=0, sep=' ', low_memory=False)
duplicated_columns = [x for x in new_column_ann.columns if new_column_ann.index.name in x]
if len(duplicated_columns) > 0: # to solve the problem of duplicated column names
new_column_ann = new_column_ann.rename(columns={duplicated_columns[0]:new_column_ann.index.name})
new_column_ann.index.name = ''
# original = column_ann.index.array.astype(float).astype(int)
projection, row, column, data = self.read_any_matrix(self.args['adir'], re.sub('_with_bins_annot.csv', projection_mat, gene_file), 2)
assert row <= column_max
if row < column_max:
projection.resize((column_max, column))
return projection, new_column_ann
else:
return projection, new_column_ann
def __extract_and_fill_unique_column(data, target, max_index):
otarget = None
if target == '' or data.index.name == target:
# copy the original index and sort by index
target = data.index.name
kwargs = {target: data.index.array}
data = data.assign(**kwargs)
data.index.name, otarget = 'raw_index', 'raw_index'
assert target in data.columns
data = data.loc[~data.index.isnull(),:]
g = data.groupby([target])
df = data.set_index(target)
df = df.loc[~df.index.isnull(),:]
index = list(set(df.index.map(lambda ind: g.indices[ind][0]).tolist()))
df = data.iloc[index,:]
df = df.sort_values(by=target)
if df.shape[0] < max_index:
removed = [i for i in range(max_index) if i not in df.loc[:, target].values]
assert len(removed)+df.shape[0] == max_index
df = df.append(pd.DataFrame({target:removed}), sort=False)
df = df.sort_values(by=target)
df = df.set_index(target)
df.index.name = target
if otarget is not None and otarget in df.columns:
df = df.drop(columns=otarget)
return df
def __compute_column_conversion(column_ann, column_max, gene_group, gene_original, projected=None):
if projected is None:
if gene_group == '':
projected = column_ann.index.array.astype(float).astype(int)
else:
projected = column_ann[gene_group].values.astype(float).astype(int)
if gene_original == '':
original = column_ann.index.array.astype(float).astype(int)
else:
original = column_ann.index.array.astype(float).astype(int)
return sparse.csr_matrix((np.ones(shape=projected.shape[0]), (original, projected)), shape=(column_max, max(projected)+1))
def __remove_nan_from_conversion(projected, original):
projected, original = zip(*[(x, y) for x, y in zip(projected, original) if np.isfinite(x) and np.isfinite(y)])
projected = np.array(projected).astype(int)
original = np.array(original).astype(int)
return projected, original
def __compute_row_conversion(row_ann, row_max, cell_group, cell_original, verbose=True):
if cell_group == '':
projected = row_ann.index.array.astype(float)
else:
projected = row_ann[cell_group].values
if cell_original == '':
original = row_ann.index.array.astype(float)
else:
original = row_ann.loc[:, cell_original].array.astype(float)
projected, original = __remove_nan_from_conversion(projected, original)
if verbose: print('Mapped groups:', len(set(projected)), list(set(projected))[0:20])
return sparse.csr_matrix((np.ones(shape=projected.shape[0]), (projected, original)), shape=(max(projected)+1, row_max))
def convert_row_and_column(adata, gene_group, cell_group, gene_original, cell_original, binary=False):
mat = adata.X
if gene_group != gene_original:
conv_mat = __compute_column_conversion(adata.var, adata.shape[1], gene_group, gene_original)
mat = mat.dot(conv_mat)
if cell_group != cell_original:
conv_mat = __compute_row_conversion(adata.obs, adata.shape[0], cell_group, cell_original)
mat = conv_mat.dot(mat)
rdf = __extract_and_fill_unique_column(adata.obs, cell_group, mat.shape[0])
cdf = __extract_and_fill_unique_column(adata.var, gene_group, mat.shape[1])
if binary:
mat = binarize(mat, threshold=1).astype(int)
new_adata = sc.AnnData(mat, var=cdf, obs=rdf)
sc.pp.filter_cells(new_adata, min_counts=1, inplace=True)
return new_adata
def __average_profiling(mX, mX_index, all_cells, output, color=''):
mX.to_csv(output+'.csv')
if color != '':
all_cells.obs.loc[:,color].value_counts().to_csv(output+'_count.csv')
all_cells.obs.loc[~pd.isnull(all_cells.obs[color]),:].drop_duplicates(subset=color, keep='first').to_csv(output+'_obs.csv')
assert mX.shape[0] <= all_cells.obs.loc[all_cells.obs.loc[:,color] == all_cells.obs.loc[:,color],:].drop_duplicates(subset=color, keep='first').shape[0]
all_cells.var.to_csv(output+'_var.csv')
assert mX.shape[1] == all_cells.var.shape[0]
def __write_average_profiles(mX, mX_index, adata, output, cluster):
mX.to_csv(output+'.csv')
if cluster != '':
adata.obs.loc[:, cluster].value_counts().to_csv(output+'_count.csv')
adata.obs.loc[~pd.isnull(adata.obs[cluster]),:].drop_duplicates(subset=cluster, keep='first').to_csv(output+'_obs.csv')
assert mX.shape[0] <= adata.obs.loc[adata.obs.loc[:, cluster] == adata.obs.loc[:, cluster],:].drop_duplicates(subset=cluster, keep='first').shape[0]
adata.var.to_csv(output+'_var.csv')
assert mX.shape[1] == adata.var.shape[0]
def compute_each_cell_signals(self, adata, markers, max_gene=100, mode=['']):
global MAX_GENE, MODE
for mode in MODE:
names = [mode+'_'+key for key in markers]
for key, name in zip(markers, names):
if mode == 'average':
scaled = self.compute_agg_exp_of_markers(adata, markers[key][0:MAX_GENE], mode)
elif mode == 'rankmean':
scaled = self.compute_agg_exp_of_markers(adata, markers[key][0:MAX_GENE], mode)
else:
scaled = self.compute_agg_exp_of_markers(adata, markers[key], mode)
adata.obs[name] = scaled
return adata
def __extract_reference_signal(reference, markers, mode='average'):
if '.csv' in reference:
exp_cells = pd.read_csv(reference, 'rb')
else:
with open(reference, 'rb') as f:
exp_cell_data = pickle.load(f)
exp_cell_data = mini_catactor.run_mini_catactor(exp_cell_data, markers, output_ext='', mode=mode)
exp_cells = exp_cell_data.obs
if str(exp_cells.index[0]).isdigit():
exp_cells.index = ['cell_'+str(x) for x in exp_cells.index]
return exp_cells
def average_for_top_signal_cells(adata, reference, markers, top_cells):
exp_cells = __extract_reference_signal(reference, markers)
adata = adata[adata.obs_names.isin(exp_cells.index),:]
for signal in exp_cells.columns:
if re.match(r'^(average|rankmean)_*', signal) is None:
continue
signal_vec = exp_cells.loc[:,signal].values
order = np.argsort(signal_vec)
zero = np.where(signal_vec == 0)[0]
for i, x in enumerate(order):
if x in zero: continue
order = order[i:len(order)]
break
up, bottom = order[::-1][0:top_cells], order[0:top_cells]
vec = pd.Series(['other' for i in range(all_cells.shape[0])], index=all_cells.obs.index)
vec.iloc[up] = 'top'
vec.iloc[bottom] = 'bottom'
vec.iloc[zero] = 'zero'
vec.name = signal
all_cells.obs = pd.concat((all_cells.obs, vec), axis=1)
print('Compute average profiles', signal)
mX, mX_index = average_for_each_cluster_less_memory_normed(all_cells, all_cells.obs.loc[:,signal])
mX = pd.DataFrame(mX, index=mX_index, columns=all_cells.var.index)
self.average_profiling(mX, mX_index, all_cells, all_cell_cluster_path+'_'+key, signal)
def average_for_each_cluster(adata, y_cluster, norm=True):
index = [x for x in y_cluster if x != 'Nan' and str(x) != 'nan']
index = sorted(list(set(index)))
matrix = None
final_index = []
for c in index:
row_ind = np.where(y_cluster == c)[0]
if norm:
ave_prof = adata[row_ind,].X.sum(axis=0)
else:
ave_prof = adata[row_ind,].X.sum(axis=0)/row_ind.shape[0]
if len(ave_prof.shape) == 0:
continue
if matrix is None: matrix = ave_prof
else: matrix = np.vstack((matrix, ave_prof))
final_index.append(c)
return np.squeeze(matrix), final_index
def run_average_profiling(adata,
markers='',
output_header='output',
cluster=['cluster'],
top_cells=500,
reference=None,
verbose=True):
if markers == '':
markers = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../marker_genes/others/marker_name_list.csv")
if str(adata.obs.index[0]).isdigit(): # convert cell labels
adata.obs_names = ['cell_'+str(x) for x in adata.obs_names]
if str(adata.var.index[0]).isdigit(): # convert bin labels
adata.var_names = ['bin_'+str(x) for x in adata.var_names]
if len(cluster) > 0:
for label in cluster:
if verbose: print('Compute average profiles', label)
if label not in adata.obs.columns: continue
mX, mX_index = average_for_each_cluster(adata, adata.obs.loc[:,label])
mX = | pd.DataFrame(mX, index=mX_index, columns=adata.var.index) | pandas.DataFrame |
#%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
raw_data = {'officer_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'jan_arrests': [4, 24, 31, 2, 3],
'feb_arrests': [25, 94, 57, 62, 70],
'march_arrests': [5, 43, 23, 23, 51]}
df = | pd.DataFrame(raw_data, columns = ['officer_name', 'jan_arrests', 'feb_arrests', 'march_arrests']) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_variable_apply1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable_apply2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# TODO: this crashes on Travis (3 process config) with size 1
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = | pd.date_range(start='1/1/2018', periods=n, freq='s') | pandas.date_range |
from python_back_end.utilities.custom_multiprocessing import DebuggablePool
import numpy as np
import pandas as pd
from python_back_end.triangle_formatting.date_sorter import DateSorter
from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier
from python_back_end.data_cleaning.type_col_extracter import TypeColExtracter
from python_back_end.definitions import SheetTypeDefinitions
from python_back_end.exceptions import NonpermissibleDateColumnDetected
from python_back_end.program_settings import PROGRAM_STRINGS as ps, PROGRAM_PARAMETERS as pp
from python_back_end.utilities.help_functions import strict_index, sum_unique
from python_back_end.utilities.state_handling import DataHolder, DataStruct
from functools import partial
class TriangleFromTableBuilder:
@staticmethod
def build_triangle_from_table(dh):
new_dh = DataHolder(dh.name)
pool = DebuggablePool(pp.N_CORES)
# First find all date cols and see if one of them has target structure.
for dh_ind, ds in enumerate(dh.data_struct_list):
id_col, hori_date_col, vert_date_col = TriangleFromTableBuilder.do_the_magic(ds, pool)
# cut each id into one row
cut_list = TriangleFromTableBuilder.make_cut_list(ds.df_data[id_col])
# use the cut_list to insert all elements
tr_cols = pd.Series(ds.df_profiles.iloc[0, :] == SheetTypeDefinitions.TRIANGLE_ELEMENT, index=ds.df_profiles.columns)
pad_header_mapping = TriangleFromTableBuilder.make_pad_header_mapping(ds, hori_date_col)
vert_col_tup = (vert_date_col, ds.df_data[vert_date_col])
hori_col_tup = (hori_date_col, ds.df_data[hori_date_col])
id_col_tup = (id_col, ds.df_data[id_col])
func = partial(TriangleFromTableBuilder.apply_cuts, cut_list, vert_col_tup, hori_col_tup, id_col_tup, pad_header_mapping)
tr_col_tup_list = [(col_name, ds.df_data[col_name]) for col_name in tr_cols.index[tr_cols]]
out = pool.map(func, tr_col_tup_list)
#for name, tr_col in ds.df_data[tr_cols.index[tr_cols]].iteritems():
for temp_df_data, temp_df_profiles, name in out:
new_dh.add_sheet(name, temp_df_data, temp_df_profiles)
#new_dh.add_sheet(name, temp_df_data, temp_df_profiles)
pool.close()
return new_dh
@staticmethod
def make_pad_header_mapping(ds, hori_date_col):
start_pad = ds.df_data.columns[-1][:pp.N_DIGITS_HEADER_PADDING]
start_pad = int(start_pad)
temp = ds.df_data[hori_date_col].values
temp_headers = sorted(np.unique(temp))
pad_header_mapping = {head: str(ind).zfill(pp.N_DIGITS_HEADER_PADDING) + ". " + str(head)
for head, ind in zip(temp_headers, range(start_pad, len(temp_headers) + start_pad))}
return pad_header_mapping
@staticmethod
def apply_cuts(cut_list, vert_col_tup, hori_col_tup, id_col_tup, pad_header_mapping, tr_col_tup):
col_list = list()
for cut in cut_list:
# make unique column headers by summing
temp_headers = hori_col_tup[1][cut].values
temp_values = tr_col_tup[1][cut].values
temp_headers, temp_values = sum_unique(temp_headers, temp_values)
temp_headers = [pad_header_mapping[el] for el in temp_headers]
col_df = pd.Series(temp_values, index=temp_headers)
# add stuff to the series
temp_num = vert_col_tup[1][cut[0]]
temp_id = id_col_tup[1][cut[0]]
col_df.loc[vert_col_tup[0]] = temp_num
col_df.loc[id_col_tup[0]] = temp_id
col_list.append(col_df)
temp_df_data = pd.concat(col_list, axis=1, sort=True)
temp_df_data = temp_df_data.transpose()
temp_df_data = temp_df_data.fillna(0)
# get the year column for sorting
sorting_col = temp_df_data.loc[:, vert_col_tup[0]]
temp_df_data = DateSorter.append_and_sort(temp_df_data, sorting_col)
temp_df_profiles = pd.DataFrame(SheetTypeDefinitions.TRIANGLE_ELEMENT, columns=temp_df_data.columns,
index=temp_df_data.index)
temp_df_profiles.loc[:, vert_col_tup[0]] = SheetTypeDefinitions.STRING_DATE
temp_df_profiles.loc[:, id_col_tup[0]] = SheetTypeDefinitions.ID_ELEMENT
#temp_ds = DataStruct()
return temp_df_data, temp_df_profiles, tr_col_tup[0]
@staticmethod
def make_cut_list(id_col):
cut_list = []
uniques = id_col.unique()
index_form = pd.Index(id_col)
for id in uniques:
idxs = id_col.index[index_form.get_loc(id)]
if isinstance(idxs, pd.Index):
idxs = idxs.tolist()
else:
idxs = [idxs]
cut_list.append(idxs)
return cut_list
@staticmethod
def do_the_magic(ds, pool):
date_cols = DateColIdentifier.identify_marked_date_cols(ds)
id_cols = | pd.Series(ds.df_profiles.iloc[0, :] == SheetTypeDefinitions.ID_ELEMENT, index=ds.df_profiles.columns) | pandas.Series |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from multiprocessing import Pool
import warnings
import numpy as np
import scipy
import pybacktest as pb
import matplotlib.pyplot as plt
import threading
import multiprocessing
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.signal
from random import choice
from time import sleep
from time import time
import sys
from trader_gym import environment
from A3C_class import *
from configs import TRAIN_DATA, LOAD_MODEL, LR, FRAMES_STACKED, NUM_WORKERS, MODEL_DIR
warnings.filterwarnings("ignore")
train_df = | pd.read_pickle(TRAIN_DATA) | pandas.read_pickle |
import os
import numpy as np
import pandas as pd
import time
def write_analysis(path, dataset_dict, datasettype, mask_part, start_time, supervised=True):
"""
Creates a text file which contains a short summary of the dataset_dict data
Parameters:
-----------
path: string
path where to save the dataframe
dataset_dict: dict
contains all the analysis data
datasettype: string
adds the name of the subset to the dataframe title
e.g. 'all', 'train', 'valid', 'test'
mask_part: list
contains the segmentation tasks
e.g. ['glomerulus', 'podocytes'], ['glomerulus'], ['podocytes']
start_time:
time at the start of the script. Used to calculate the duration of the analysis
supervised:
(optional)
Returns:
--------
nothing
"""
for mask_el in mask_part:
if mask_el == 'podocytes':
filename = datasettype + '_podos.txt'
filestr = 'podos images'
elif mask_el == 'glomerulus':
filename = datasettype + '_gloms.txt'
filestr = 'gloms images'
else:
filename = datasettype + 'unknown.txt'
filestr = 'unknown type'
write_txt = open(str(os.path.join(path, filename)), "w")
if supervised:
dc_mean = np.sum(np.array(dataset_dict['dice_coeffs_%s' % mask_el])) / len(dataset_dict['dice_coeffs_%s'
% mask_el])
dc_min = np.min(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))
dc_max = np.max(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))
object_dc_mean = np.sum(np.array(dataset_dict['object_dc_%s' % mask_el])) / len(dataset_dict['object_dc_%s'
% mask_el])
object_dc_min = np.min(np.array(dataset_dict['object_dc_%s' % mask_el]))
object_dc_max = np.max(np.array(dataset_dict['object_dc_%s' % mask_el]))
pearson = calculate_pearson(dataset_dict['count_masks_%s' % mask_el], dataset_dict['count_preds_%s'
% mask_el])
write_txt.write(str("Mean dice coefficient on pixels of " + filestr + " compared to groundtruth: ") +
str(dc_mean) + '\n')
write_txt.write(str("Min dice coefficient on pixels of " + filestr + " compared to groundtruth: ") +
str(dc_min) + '\n')
write_txt.write(str("Max dice coefficient on pixels of " + filestr + " compared to groundtruth: ") +
str(dc_max) + '\n')
write_txt.write(str("Pearson correlation coefficient on objects of " + filestr +
" compared to groundtruth: ") + str(pearson) + '\n')
write_txt.write(str("Mean dice coeff on objects of " + filestr + " compared to groundtruth: ") +
str(object_dc_mean) + '\n')
write_txt.write(str("Min dice coeff on objects of " + filestr + " compared to groundtruth: ") +
str(object_dc_min) + '\n')
write_txt.write(str("Max dice coeff on objects of " + filestr + " compared to groundtruth: ") +
str(object_dc_max) + '\n')
write_txt.write('\n')
duration = time.time() - start_time
duration_std = int(duration / 3600)
duration_min = int((duration % 3600) / 60)
duration_sec = int(duration % 60)
write_txt.write(str("Test time: ") + str(duration_std) + "h " + str(duration_min)
+ "min " + str(duration_sec) + 'sec \n')
write_txt.close()
return
def write_dataframe(path, dataset_dict, image_list, datasettype, mask_part):
"""
Creates a pandas dataframe containing the analysis of mask and prediction
Parameters:
-----------
path: string
path where to save the dataframe
dataset_dict: dict
contains all the analysis data
image_list: list
contains all the image names
datasettype: string
adds the name of the subset to the dataframe title
e.g. 'all', 'train', 'valid', 'test'
mask_part: list
contains the segmentation tasks
e.g. ['glomerulus', 'podocytes'], ['glomerulus'], ['podocytes']
Returns:
--------
nothing
"""
for mask_el in mask_part:
titles = []
for i in range(len(image_list)):
# Get rid of .tif and the path before
image_name = os.path.split(image_list[i])[1]
titles.append(image_name[:-4])
df = pd.DataFrame({'Sample name': pd.Series(titles),
'GT count': pd.Series(dataset_dict['count_masks_%s' % mask_el]),
'Network count': pd.Series(dataset_dict['count_preds_%s' % mask_el]),
'GT area': pd.Series(dataset_dict['area_masks_%s' % mask_el]),
'Network area': pd.Series(dataset_dict['area_preds_%s' % mask_el]),
'Network dice pixel': pd.Series(dataset_dict['dice_coeffs_%s' % mask_el]),
'Network dice object': pd.Series(dataset_dict['object_dc_%s' % mask_el]),
'Network True pos': pd.Series(dataset_dict['tp_%s' % mask_el]),
'Network False pos': pd.Series(dataset_dict['fp_%s' % mask_el]),
'Network False neg': pd.Series(dataset_dict['fn_%s' % mask_el])})
df.to_excel(str(os.path.join(path, datasettype + '_Dataframe_' + mask_el + '.xlsx')))
# df.to_csv(path + datasettype + '_Dataframe_' + mask_el + '.csv')
return
def write_readouts(path, dataset_dict, image_list, datasettype, mask_part,
do_wt1_signal, do_dach1_signal, do_stereology_pred, do_stereology_gt):
"""
Creates the csv output which will be used for the classification.
Dataframe contains optionally the WT1 signal of the glomerulus prediction,
the DACH1 signal for the podocoyte prediction and
the stereological calculations.
"""
titles = []
for i in range(len(image_list)):
image_name = os.path.split(image_list[i])[1]
titles.append(image_name[:-4])
# Segmentation of only 1 class was applied (e.g. glomerulus or podocytes)
if len(mask_part) == 1:
mask_el = mask_part.pop()
if mask_el == "glomerulus":
network_area = "glomerulus_area"
# Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True
if do_wt1_signal:
df = pd.DataFrame(
{'image_name': pd.Series(titles),
network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),
'mean_WT1_signal_in_glom': pd.Series(dataset_dict['mean_WT1_glom_preds']),
'var_WT1_signal_in_glom': pd.Series(dataset_dict['var_WT1_glom_preds']),
'median_WT1_signal_in_glom': pd.Series(dataset_dict['median_WT1_glom_preds']),
'min_WT1_signal_in_glom': pd.Series(dataset_dict['min_WT1_glom_preds']),
'max_WT1_signal_in_glom': pd.Series(dataset_dict['max_WT1_glom_preds']),
'perc25_WT1_signal_in_glom': pd.Series(dataset_dict['perc25_WT1_glom_preds']),
'perc75_WT1_signal_in_glom': pd.Series(dataset_dict['perc75_WT1_glom_preds'])})
else:
df = pd.DataFrame({'image_name': pd.Series(titles),
network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})
elif mask_el == "podocytes":
network_count = "podocyte_count"
network_area = "podocyte_nuclear_area"
# Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True
if do_dach1_signal:
df = pd.DataFrame({'image_name': pd.Series(titles),
network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),
network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),
'mean_DACH1_signal_in_podo': pd.Series(dataset_dict['mean_DACH1_podo_preds']),
'var_DACH1_signal_in_podo': | pd.Series(dataset_dict['var_DACH1_podo_preds']) | pandas.Series |
from typing import Dict
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
frequencies = ["D", "15min"]
DistributionDict = Dict[str, pd.DataFrame]
@pytest.fixture(params=frequencies, ids=frequencies)
def date_range(request) -> pd.DatetimeIndex:
"""Create pd.Series with range of dates."""
freq = request.param
dtr = pd.date_range(start="2020-01-01", end="2020-03-01", freq=freq)
return dtr
@pytest.fixture
def all_date_present_df(date_range: pd.Series) -> pd.DataFrame:
"""Create pd.DataFrame that contains some target on given range of dates without gaps."""
df = pd.DataFrame({"timestamp": date_range})
df["target"] = [i for i in range(len(df))]
df.set_index("timestamp", inplace=True)
return df
@pytest.fixture
def all_date_present_df_two_segments(all_date_present_df: pd.Series) -> pd.DataFrame:
"""Create pd.DataFrame that contains two segments with some targets on given range of dates without gaps."""
df_1 = all_date_present_df.reset_index()
df_2 = all_date_present_df.copy().reset_index()
df_1["segment"] = "segment_1"
df_2["segment"] = "segment_2"
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
return df
@pytest.fixture
def df_with_missing_value_x_index(random_seed, all_date_present_df: pd.DataFrame) -> Tuple[pd.DataFrame, int]:
"""Create pd.DataFrame that contains some target on given range of dates with one gap."""
# index cannot be first or last value,
# because Imputer should know starting and ending dates
timestamps = sorted(all_date_present_df.index)[1:-1]
idx = np.random.choice(timestamps)
df = all_date_present_df
df.loc[idx, "target"] = np.NaN
return df, idx
@pytest.fixture
def df_with_missing_range_x_index(all_date_present_df: pd.DataFrame) -> Tuple[pd.DataFrame, list]:
"""Create pd.DataFrame that contains some target on given range of dates with range of gaps."""
timestamps = sorted(all_date_present_df.index)
rng = timestamps[2:7]
df = all_date_present_df
df.loc[rng, "target"] = np.NaN
return df, rng
@pytest.fixture
def df_with_missing_range_x_index_two_segments(
df_with_missing_range_x_index: pd.DataFrame,
) -> Tuple[pd.DataFrame, list]:
"""Create pd.DataFrame that contains some target on given range of dates with range of gaps."""
df_one_segment, rng = df_with_missing_range_x_index
df_1 = df_one_segment.reset_index()
df_2 = df_one_segment.copy().reset_index()
df_1["segment"] = "segment_1"
df_2["segment"] = "segment_2"
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
return df, rng
@pytest.fixture
def df_all_missing(all_date_present_df: pd.DataFrame) -> pd.DataFrame:
"""Create pd.DataFrame with all values set to nan."""
all_date_present_df.loc[:, :] = np.NaN
return all_date_present_df
@pytest.fixture
def df_all_missing_two_segments(all_date_present_df_two_segments: pd.DataFrame) -> pd.DataFrame:
"""Create pd.DataFrame with all values set to nan."""
all_date_present_df_two_segments.loc[:, :] = np.NaN
return all_date_present_df_two_segments
@pytest.fixture
def daily_exog_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": | pd.date_range(start="2020-01-05", freq="H", periods=48) | pandas.date_range |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.