prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import re
import socket
from datetime import datetime
from urlextract import URLExtract
import urllib.parse as urlparse
from urllib.parse import parse_qs
import click
import argparse
import csv
import os
from dateutil.parser import parse
import pandas as pd
from urllib.parse import unquote
import hashlib
# When you need to connect to a database
#from pandas.io import sql
#import mysql.connector
#from sqlalchemy import create_engine
#import mysql.connector
#Global Variables
data = []
map_refer = {}
# Missing a ArgumentParser
#parser = argparse.ArgumentParser(description='Description of your program')
#parser.add_argument('-p','--path', help='Localization of the patching files', default= "./Files/")
#args = vars(parser.parse_args())
def extract(request):
"""
Extract url domain from wayback request.
"""
extractor = URLExtract()
try:
urls = extractor.find_urls('/'.join(request.split('/')[3:]))
if urls:
return urls[0]
else:
return None
except:
import pdb;pdb.set_trace()
def getParametersFromRequestWayback(request, df, i):
"""
Extract parameters from wayback request.
"""
# Just a sanity check.
if not pd.isnull(df.at[i, 'DATE']):
try:
# Generate timestamp using the parameter DATE
date_simple = df.at[i, 'DATE'].replace("[", "").replace("]", "")
date = datetime.strptime(date_simple, "%d/%b/%Y:%H:%M:%S")
# Just a sanity check.
if re.match(r"GET /wayback/[0-9]+", request):
#Extract url domain
url = extract(request)
if urlparse.urlparse(url).netloc != "":
final_url = urlparse.urlparse(url).netloc
else:
final_url = url
#Put into a list to later generate a dataframe
data.append([df.at[i, "IP_ADDRESS"], df.at[i, "USER_AGENT"], date.timestamp(), df.at[i, "REQUEST"], df.at[i, "STATUS_CODE"], df.at[i, "PREVIOUS_REQUEST"], final_url])
except:
raise ValueError("Error - getParametersFromRequestWayback function")
def getParametersFromRequest(request, df, i, boolRequest):
"""
Extract and process the parameters from query request.
Function only used for Apache logs.
"""
# Check whether we are processing the request or the previous_request
if boolRequest:
#This request will not be analyzed in the first analysis, however it is done for later analysis.
#Image Search JSP and Page Search JSP will be treated as equals.
if request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
# Set the parameter BOOL_QUERY (i.e., =1 means the line is a query)
df.at[i, 'BOOL_QUERY'] = 1
# Set the parameter TYPE_SEARCH
if request.startswith("GET /search.jsp?"):
df.at[i, 'TYPE_SEARCH'] = "search_jsp"
else:
df.at[i, 'TYPE_SEARCH'] = "images_jsp"
# Parse the REQUEST and Set the parameters TRACKINGID, USER_TRACKING_ID, SEARCH_TRACKING_ID, QUERY, LANG_REQUEST, FROM_REQUEST, TO_REQUEST
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
try:
df.at[i, 'QUERY'] = unquote(parse_qs(parsed.query)['query'][0])
df.at[i, 'LANG_REQUEST'] = parse_qs(parsed.query)['l'][0]
except:
df.at[i, 'BOT'] = 1
try:
df.at[i, 'FROM_REQUEST'] = parse_qs(parsed.query)['dateStart'][0]
df.at[i, 'TO_REQUEST'] = parse_qs(parsed.query)['dateEnd'][0]
except:
df.at[i, 'FROM_REQUEST'] = None
df.at[i, 'TO_REQUEST'] = None
#Image Search API and Page Search API calls will be treated as equals.
elif "textsearch?" in request or "imagesearch?" in request:
# Set the parameter BOOL_QUERY (i.e., =1 means the line is a query)
df.at[i, 'BOOL_QUERY'] = 1
# Set the parameter TYPE_SEARCH
if request.startswith("GET /imagesearch?"):
df.at[i, 'TYPE_SEARCH'] = "imagesearch"
else:
df.at[i, 'TYPE_SEARCH'] = "textsearch"
# Parse the REQUEST and Set the parameters TRACKINGID, USER_TRACKING_ID, SEARCH_TRACKING_ID, QUERY, MAXITEMS, PAGE, FROM_REQUEST, TO_REQUEST
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
try:
#import pdb;pdb.set_trace()
df.at[i, 'QUERY'] = unquote(parse_qs(parsed.query)['q'][0])
offset = int(parse_qs(parsed.query)['offset'][0])
df.at[i, 'MAXITEMS'] = int(parse_qs(parsed.query)['maxItems'][0])
df.at[i, 'PAGE'] = int(offset/df.at[i, 'MAXITEMS'])
except:
df.at[i, 'BOT'] = 1
try:
df.at[i, 'FROM_REQUEST'] = parse_qs(parsed.query)['from'][0]
df.at[i, 'TO_REQUEST'] = parse_qs(parsed.query)['to'][0]
except:
df.at[i, 'FROM_REQUEST'] = None
df.at[i, 'TO_REQUEST'] = None
#Process the parameter REQUEST and set the parameter PREVIOUS_REQUEST
else:
if request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
parsed = urlparse.urlparse(request)
df.at[i, 'PREVIOUS_QUERY'] = parse_qs(parsed.query)['query'][0]
elif request.startswith("GET /imagesearch?") or request.startswith("GET /textsearch?"):
parsed = urlparse.urlparse(request)
df.at[i, 'PREVIOUS_QUERY'] = parse_qs(parsed.query)['q'][0]
def processDataframe(request, previous_request, file_name, df, i, all_info_date):
"""
Function to process each log depending on the format (Apache vs Log4j)
"""
# Check if we are processing the Apache Log
if "logfile" in file_name:
getParametersFromRequest(request.replace(" HTTP/1.1", ""), df, i, True)
if pd.isnull(previous_request):
getParametersFromRequest(previous_request.replace(" HTTP/1.1", ""), df, i, False)
# if we are not processing the Apache Log
else:
#Only thing needed from request
parsed = urlparse.urlparse(request)
try:
df.at[i, 'TRACKINGID'] = parse_qs(parsed.query)['trackingId'][0]
df.at[i, 'USER_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[0]
df.at[i, 'SEARCH_TRACKING_ID'] = parse_qs(parsed.query)['trackingId'][0].split("_")[1]
except:
df.at[i, 'TRACKINGID'] = ""
# Just a sanity check.
if not pd.isnull(df.at[i, 'DATE']):
try:
# Generate TIMESTAMP using the parameter DATE and Set the parameters YEAR, MONTH, DAY, HOUR, MINUTE
date_simple = df.at[i, 'DATE'].replace("[", "").replace("]", "")
date = datetime.strptime(date_simple, "%d/%b/%Y:%H:%M:%S")
df.at[i, 'TIMESTAMP'] = date.timestamp()
if all_info_date:
df.at[i, 'YEAR'] = date.year
df.at[i, 'MONTH'] = date.month
df.at[i, 'DAY'] = date.day
df.at[i, 'HOUR'] = date.hour
df.at[i, 'MINUTE'] = date.minute
except:
df.at[i, 'BOT'] = 1
else:
df.at[i, 'BOT'] = 1
return date
def mergeFiles():
"""
Function that will process each log and merge them (The core of this file).
"""
click.secho("Start Process...", fg='green')
#Location\path of the Logs.
mypath = "./data/"
#Create Dataframes for each (Apache Log, Image Search API Log4j, Page Search API Log4j, Webapp API Log4j).
df_merge_apache_file = None
df_merge_image_file = None
df_merge_page_file = None
df_merge_arquivo_webapp_file = None
# Just to initialize variables that we are going to use (can be removed).
df_log = None
df_image = None
df_page = None
df_arquivo = None
## For each log file:
for subdir, dirs, files in os.walk(mypath):
#If list is not empty.
if files:
## Progress bar with the number of log files.
with click.progressbar(length=len(files), show_pos=True) as progress_bar_total:
for file in files:
progress_bar_total.update(1)
#Get Filename
file_name = os.path.join(subdir, file)
# Process Apache Logs
if file_name.startswith("./data/logs/arquivo.pt_apache/logfile"):
#Read file into Dataframe
names_apache = ["IP_ADDRESS", "CLIENT_ID", "USER_ID", "DATE", "ZONE", "REQUEST", "STATUS_CODE", "SIZE_RESPONSE", "PREVIOUS_REQUEST", "USER_AGENT", "RESPONSE_TIME"]
df_log = pd.read_csv(file_name, sep='\s+', names=names_apache)
#Init new collumns
df_log["UNIQUE_USER"] = ""
df_log["SPELLCHECKED"] = 0
df_log["REFER"] = ""
#Tracking
df_log["TRACKINGID"] = ""
df_log["USER_TRACKING_ID"] = ""
df_log["SEARCH_TRACKING_ID"] = ""
#Date
df_log["TIMESTAMP"] = 0
df_log["YEAR"] = 0
df_log["MONTH"] = 0
df_log["DAY"] = 0
df_log["HOUR"] = 0
df_log["MINUTE"] = 0
#Search and Query
df_log["TYPE_SEARCH"] = ""
df_log["QUERY"] = ""
df_log["LANG_REQUEST"] = ""
df_log["FROM_REQUEST"] = ""
df_log["TO_REQUEST"] = ""
df_log["PREVIOUS_QUERY"] = ""
df_log["MAXITEMS"] = 0
df_log["PAGE"] = 0
#Query from robots or internal requests (default is 0, "Not a Bot")
df_log["BOT"] = 0
## Progress Bar of the number of lines processed (Apache Log File).
with click.progressbar(length=df_log.shape[0], show_pos=True) as progress_bar:
for i in df_log.index:
progress_bar.update(1)
#Get Request
request = df_log.at[i, 'REQUEST']
#Get Previous Request
previous_request = df_log.at[i, 'PREVIOUS_REQUEST']
#Problem with some requestes
if isinstance(request, str) and isinstance(previous_request, str):
#We will create different files (Query Log file and Wayback Log file)
# Check if the request is not from wayback
if "wayback" not in request:
# Only process requests from textsearch, imagesearch, search.jsp, and images.jsp.
if request.startswith("GET /textsearch?") or request.startswith("GET /imagesearch?") or request.startswith("GET /search.jsp?") or request.startswith("GET /images.jsp?"):
processDataframe(request, previous_request, file_name, df_log, i, True)
#Generate a unique identifier for each user, making it an anonymized user.
string_user = str(df_log.at[i, 'IP_ADDRESS']) + str(df_log.at[i, 'USER_AGENT'])
df_log.at[i, 'UNIQUE_USER'] = int(hashlib.sha1(string_user.encode("utf-8")).hexdigest(), 16) % (10 ** 8)
#Check if the entry was generated because the user clicked on the query suggestion.
if "spellchecked=true" in previous_request:
df_log.at[i, 'SPELLCHECKED'] = 1
#Get a dictionary with the refers
if "arquivo.pt" not in previous_request:
df_log.at[i, 'REFER'] = previous_request
if previous_request not in map_refer:
map_refer[previous_request] = 1
else:
map_refer[previous_request] += 1
else:
#This condition removes lines such as "GET /js/jquery-1.3.2.min.js HTTP/1.1"
df_log.at[i, 'BOT'] = 1
else:
"""
Process the wayback requests
"""
#Set the entrie as "Bot" to not appear in the queries dataset.
df_log.at[i, 'BOT'] = 1
getParametersFromRequestWayback(request, df_log, i)
else:
df_log.at[i, 'BOT'] = 1
#Remove entries from "BOTs"
df_log = df_log[df_log['BOT']==0]
#Concatenate the file with previous files
df_log = df_log[['IP_ADDRESS', 'STATUS_CODE', 'REQUEST', 'USER_AGENT', 'TRACKINGID', 'USER_TRACKING_ID', 'SEARCH_TRACKING_ID', 'TIMESTAMP', 'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'TYPE_SEARCH', 'QUERY', 'PAGE', 'MAXITEMS', 'LANG_REQUEST', 'FROM_REQUEST', 'TO_REQUEST', 'REFER', 'SPELLCHECKED', 'UNIQUE_USER']]
frames = [df_merge_apache_file, df_log]
df_merge_apache_file = pd.concat(frames)
## Logs Image Search API
if file_name.startswith("./data/logs/arquivo.pt_image_search/imagesearch"):
#Read file into DataFrame
names_image_search = ["DATE", "LOG_TYPE", "APPLICATION", "-", "IP_ADDRESS", "USER_AGENT", "URL_REQUEST", "IMAGE_SEARCH_RESPONSE(ms)", "IMAGE_SEARCH_PARAMETERS", "IMAGE_SEARCH_RESULTS"]
df_image = pd.read_csv(file_name, sep='\t', error_bad_lines=False, names=names_image_search)
#Init New Collumns
df_image["TRACKINGID"] = ""
df_image["BOT"] = 0
df_image["TIMESTAMP"] = 0
## Progress Bar of the number of lines processed (Image Search API Log4j).
with click.progressbar(length=df_image.shape[0], show_pos=True) as progress_bar:
for i in df_image.index:
progress_bar.update(1)
# Just a sanity check.
if not pd.isnull(df_image.at[i, 'IP_ADDRESS']):
request = df_image.at[i, 'URL_REQUEST']
# Just a sanity check.
if not | pd.isnull(request) | pandas.isnull |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-05-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-06-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-07-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-08-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-09-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-10-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-11-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-12-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_feb():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
( | Timestamp('2012-08-01 00:00:00') | pandas.Timestamp |
# pip install bs4 lxml
import time
import re
import json
import os
from bs4 import BeautifulSoup
import pandas as pd
import functions as func
from settings import Settings as st
class Songs:
def __init__(self, keyword,limit):
# 初始歌单
self.only_lyric = []
self.plist = None
self.keyword = keyword
self.limit = limit
def get_plist_songs(self, url):
if self.plist is None:
self.plist = | pd.DataFrame(columns=['id','name','url']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([ | u(' a ') | pandas.compat.u |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 09:05:39 2015
@author: efouche
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from ibmdbpy.internals import idadf_state
from ibmdbpy.utils import timed
from collections import OrderedDict
import pandas as pd
import six
@idadf_state
def entropy(idadf, target=None, mode="normal", execute=True, ignore_indexer=True):
"""
Compute the entropy for a set of features in an IdaDataFrame.
Parameters
----------
idadf: IdaDataFrame
target: str or list of str, optional
A column or list of columns to be used as features. Per default,
consider all columns.
mode: "normal" or "raw"
Experimental
execute: bool, default:True
Experimental. Execute the request or return the correponding SQL query
ignore_indexer: bool, default: True
Per default, ignore the column declared as indexer in idadf
Returns
-------
Pandas.Series
Notes
-----
Input column should be categorical, otherwise this measure does not make
much sense.
Examples
--------
>>> idadf = IdaDataFrame(idadb, "IRIS")
>>> entropy(idadf)
"""
if target is not None:
if isinstance(target, six.string_types):
target = [target]
targetstr = "\",\"".join(target)
subquery = "SELECT COUNT(*) AS a FROM %s GROUP BY \"%s\""%(idadf.name,targetstr)
if mode == "normal":
length = len(idadf)
query = "SELECT(SUM(-a*LOG(a))/%s+LOG(%s))/LOG(2)FROM(%s)"%(length, length, subquery)
elif mode == "raw":
query = "SELECT SUM(-a*LOG(a)) FROM(%s)"%(subquery)
if not execute:
query = query[:query.find("FROM")] + ",'%s'"%"\',\'".join(target) + query[query.find("FROM"):]
return query
return idadf.ida_scalar_query(query)
else:
entropy_dict = OrderedDict()
columns = list(idadf.columns)
# Remove indexer
if ignore_indexer:
if idadf.indexer:
if idadf.indexer in columns:
columns.remove(idadf.indexer)
for column in columns:
entropy_dict[column] = entropy(idadf, column, mode = mode)
# Output
if len(columns) > 1:
result = | pd.Series(entropy_dict) | pandas.Series |
import pandas as pd
import numpy as np
#######################################################################################
# Return recommendations based on reviews
#######################################################################################
def find_reviews(query,reviews, n_results=5):
# Create vector from query and compare with global embedding
sentence = [query]
sentence_vector = np.array(embed(sentence))
inner_product = np.inner(sentence_vector, sentence_array)[0]
# Find sentences with highest inner products
top_n_sentences = | pd.Series(inner_product) | pandas.Series |
import os
data_path = os.path.abspath(os.path.join('other','aml','w1','datasets'))
### This cell imports the necessary modules and sets a few plotting parameters for display
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20.0, 10.0)
### GRADED
### Code a function called `calc_posterior`
### ACCEPT three inputs
### Two floats: the likelihood and the prior
### One list of tuples, where each tuple has two values corresponding to:
### ### ( P(Bn) , P(A|Bn) )
### ### ### Assume the list of tuples accounts for all potential values of B
### ### ### And that those values of B are all mutually exclusive.
### The list of tuples allows for the calculation of normalization constant.
### RETURN a float corresponding to the posterior probability
### YOUR ANSWER BELOW
def calc_posterior(likelihood, prior, norm_list):
"""
Calculate the posterior probability given likelihood,
prior, and normalization
Positional Arguments:
likelihood -- float, between 0 and 1
prior -- float, between 0 and 1
norm_list -- list of tuples, each tuple has two values
the first value corresponding to the probability of a value of "b"
the second value corresponding to the probability of
a value of "a" given that value of "b"
Example:
likelihood = .8
prior = .3
norm_list = [(.25 , .9), (.5, .5), (.25,.2)]
print(calc_posterior(likelihood, prior, norm_list))
# --> 0.45714285714285713
"""
Pa = 0
for t in norm_list:
x = t[0] * t[1]
Pa+=x
return (likelihood*prior)/Pa
likelihood = .8
prior = .3
norm_list = [(.25 , .9), (.5, .5), (.25,.2)]
print(calc_posterior(likelihood, prior, norm_list))
def euclid_dist(p1, p2):
"""
Calculate the Euclidian Distance between two points
Positional Arguments:
p1 -- A tuple of n numbers
p2 -- A tuple of n numbers
Example:
p1 = (5,5)
p2 = (0,0)
p3 = (5,6,7,8,9,10)
p4 = (1,2,3,4,5,6)
print(euclid_dist(p1,p2)) #--> 7.0710678118654755
print(euclid_dist(p3,p4)) #--> 9.797958971132712
"""
return float(np.linalg.norm(np.array(p1)-np.array(p2)))
p1 = (5,5)
p2 = (0,0)
p3 = (5,6,7,8,9,10)
p4 = (1,2,3,4,5,6)
print(euclid_dist(p1,p2))
print(euclid_dist(p3,p4))
### GRADED
### Build a function called "x_preprocess"
### ACCEPT one input, a numpy array
### ### Array may be one or two dimensions
### If input is two dimensional, make sure there are more rows than columns
### ### Then prepend a column of ones for intercept term
### If input is one-dimensional, prepend a one
### RETURN a numpy array, prepared as described above,
### which is now ready for matrix multiplication with regression weights
def x_preprocess(input_x):
if (input_x.ndim==2):
if (len(input_x) < input_x.shape[1]):
input_x = input_x.transpose()
if (input_x.ndim==2):
input_x = np.concatenate((np.ones((input_x.shape[0],1), dtype=int), input_x), axis=1)
if (input_x.ndim==1):
input_x = np.insert(input_x, 0, 1)
return np.array(input_x)
input1 = np.array([[2,3,6,9],[4,5,7,10]])
input2 = np.array([2,3,6])
input3 = np.array([[2,4],[3,5],[6,7],[9,10]])
for i in [input1, input2, input3]:
print(x_preprocess(i), "\n")
"""
# --> [[ 1. 2. 4.]
[ 1. 3. 5.]
[ 1. 6. 7.]
[ 1. 9. 10.]]
[1 2 3 6]
[[ 1. 2. 4.]
[ 1. 3. 5.]
[ 1. 6. 7.]
[ 1. 9. 10.]]
"""
def calculate_map_coefficients(aug_x, output_y, lambda_param, sigma):
X = aug_x
Y = output_y
output_df = | pd.DataFrame() | pandas.DataFrame |
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import Models
from Models.Models import away_features, home_features, features, build_DT_classifier, build_RF_classifier, build_XGBoostClassifier
import Helper
import Models.moving_average_dataset
import Models.backtesting as backtesting
import Models.elo_model as elo_model
import dicts_and_lists as dal
import logging, coloredlogs
pd.set_option('display.max_rows', 1000)
# ------------ Hyperparameters ------------ #
leave_out = '2020'
margin = 0
betting_limiter = True
betting_limit = 0.125
prob_threshold = 0.65
prob_2x_bet = 0.99
offset = 0.0 # Added probability
average_N = 3
skip_n = 0
# ------ Logger ------- #
logger = logging.getLogger('test_models.py')
coloredlogs.install(level='INFO', logger=logger)
def extract_and_predict(next_game):
# Extract away_team Name and home_team Name from last_N_games_away and last_N_games_home
away_team = next_game['Team_away'].values[0]
home_team = next_game['Team_home'].values[0]
# Before predicting a game, check that it has not yet been predicted.
# This is the case where e.g., TeamHome's next game at home against TeamAway has been evaluated ...
# by both next home game and next away game. They are the same game, which are therefore predicted twice.
if next_game.index[0] not in evaluated_indexes:
# Track the inserted game based on its index
evaluated_indexes.append(next_game.index[0])
# Extract indexes for last N games
next_games_away_indexes = df.loc[df['Team_away'] == away_team].index
next_games_home_indexes = df.loc[df['Team_home'] == home_team].index
next_away_indexes_reduced = [x for x in next_games_away_indexes if x < next_game.index[0]][-average_N:]
next_home_indexes_reduced = [x for x in next_games_home_indexes if x < next_game.index[0]][-average_N:]
# Extract last N games based on indexes
last_N_games_away = df.iloc[next_away_indexes_reduced]
last_N_games_home = df.iloc[next_home_indexes_reduced]
# Concatenate the two teams with their average stats
to_predict = pd.concat(
[
last_N_games_away[away_features].mean(),
last_N_games_home[home_features].mean()
],
axis=0)[features]
# Standardize the input
to_predict = scaler.transform(to_predict.values.reshape(1,-1))
pred = int(clf.predict(to_predict))
true_value = next_game['Winner'].values[0]
predictions.append(pred)
winners.append(true_value)
prob = clf.predict_proba(to_predict)
model_prob.append(max(prob[0]))
model_odds.append(1/max(prob[0]))
odds_away.append(next_game['OddsAway'].values[0])
odds_home.append(next_game['OddsHome'].values[0])
dates_list.append(next_game['Date'].values[0])
home_teams_list.append(home_team)
away_teams_list.append(away_team)
# Only the most significant features will be considered
away_features = away_features
home_features = home_features
# Create the df containing stats per single game on every row
train_df = pd.read_csv('past_data/average_seasons/average_N_4Seasons.csv')
# Standardize the DataFrame
std_df, scaler = Helper.standardize_DataFrame(train_df)
### Test the Classification model based on the mean of the last average_N games ###
logger.info('\nSelect the type of model you want to backtest:\n\
[1]: Decision Tree + Elo Model\n\
[2]: Random Forest + Elo Model\n\
[3]: Random Forest + Elo Model + Build Moving Average Dataset\n\
[4]: XGBoost + Elo Model'
)
inp = input()
if inp == '1':
logger.info('Building a Decision Tree Classifier...')
clf = build_DT_classifier(std_df)
elif inp == '2':
logger.info('Building a Random Forest Classifier...')
clf = build_RF_classifier(std_df)
elif inp == '3':
Models.moving_average_dataset.build_moving_average_dataset(average_N, skip_n, leave_out=leave_out)
train_df = pd.read_csv('past_data/average_seasons/average_N_4Seasons.csv')
# Standardize the DataFrame
std_df, scaler = Helper.standardize_DataFrame(train_df)
logger.info('Building a Random Forest Classifier...')
clf = build_RF_classifier(std_df)
elif inp == '4':
clf = build_XGBoostClassifier(std_df)
# To evaluate accuracy
dates_list = []
predictions = []
winners = []
model_prob = []
model_odds = []
odds_away = []
odds_home = []
home_teams_list = []
away_teams_list = []
evaluated_indexes = []
# Backtest on the 2020/2021 Season
df = pd.read_csv('past_data/2020_2021/split_stats_per_game.csv')
print(f'Stats averaged from {average_N} games, first {skip_n} games are skipped.')
for skip_n_games in range(skip_n, 50-average_N):
last_N_games_away, last_N_games_home = backtesting.get_first_N_games(df, average_N, skip_n_games)
# Get next game based on next_game_index
for team in dal.teams:
# Find all games where "team" plays away
next_games_away_indexes = df.loc[df['Team_away'] == team].index
last_away_game = last_N_games_away[dal.teams_to_int[team]][-1:]
# Check if there are more games past the current index
try:
dal.last_home_away_index_dict[team][0] = last_away_game.index[0]
except:
pass
if max(next_games_away_indexes) != dal.last_home_away_index_dict[team][0]:
next_game_index = min(i for i in next_games_away_indexes[skip_n+average_N:] if i > last_away_game.index)
next_game = df.loc[df.index == next_game_index]
next_games_home_indexes = df.loc[df['Team_home'] == next_game['Team_home'].values[0]].index
if next_game_index in next_games_home_indexes[skip_n+average_N:]:
extract_and_predict(next_game)
# Find all games where "team" plays home
next_games_home_indexes = df.loc[df['Team_home'] == team].index
last_home_game = last_N_games_home[dal.teams_to_int[team]][-1:]
# Check if there are more games past the current index
try:
dal.last_home_away_index_dict[team][1] = last_home_game.index[0]
except:
pass
if max(next_games_home_indexes) != dal.last_home_away_index_dict[team][1]:
next_game_index = min(i for i in next_games_home_indexes[skip_n+average_N:] if i > last_home_game.index)
next_game = df.loc[df.index == next_game_index]
next_games_away_indexes = df.loc[df['Team_away'] == next_game['Team_away'].values[0]].index
if next_game_index in next_games_away_indexes[skip_n+average_N:]:
extract_and_predict(next_game)
print(f'Evaluated samples: {len(predictions)}')
# Evaluate the predictions
data = {
'index' : evaluated_indexes,
'Date' : dates_list,
'Team_away' : away_teams_list,
'Team_home' : home_teams_list,
'Predictions' : predictions,
'Winner' : winners,
'ModelProbability' : model_prob,
'ModelOdds' : model_odds,
'OddsHome' : odds_home,
'OddsAway' : odds_away
}
forest_df = | pd.DataFrame(data) | pandas.DataFrame |
"""Pytest unit tests for the core module of GuideMaker
"""
import os
import pytest
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import numpy as np
import pandas as pd
from typing import List, Dict, Tuple, TypeVar, Generator
from Bio import Seq
import altair as alt
from pathlib import Path
import guidemaker
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
#configpath="guidemaker/data/config_default.yaml"
from guidemaker.definitions import ROOT_DIR
configpath = os.path.join(ROOT_DIR,"data","config_default.yaml")
#PamTarget Class
def test_pam_pam():
pamobj = guidemaker.core.PamTarget("NGG", "5prime")
assert getattr(pamobj, "pam") == "NGG"
def test_pam_orientation():
pamobj = guidemaker.core.PamTarget("GATN", "3prime")
assert getattr(pamobj, "pam_orientation") == "3prime"
pamobj = guidemaker.core.PamTarget("NGG", "5prime")
def test_pam_find_targets_5p():
pamobj = guidemaker.core.PamTarget("NGG", "5prime")
testseq1 = [SeqRecord(Seq.Seq("AATGATCTGGATGCACATGCACTGCTCCAAGCTGCATGAAAA",
alphabet=IUPAC.ambiguous_dna), id="testseq1")]
target = pamobj.find_targets(seq_record_iter=testseq1, target_len=6)
assert target['target'][0] == "ATGCAC"
assert target['target'][1] == "AGCAGT"
def test_pam_find_targets_3p():
pamobj = guidemaker.core.PamTarget("NGG", "3prime")
testseq1 = [SeqRecord(Seq.Seq("AATGATCTGGATGCACATGCACTGCTCCAAGCTGCATGAAAA",
alphabet=IUPAC.ambiguous_dna), id="testseq1")]
target = pamobj.find_targets(seq_record_iter=testseq1, target_len=6)
assert target['target'][0] == "ATGATC"
assert target['target'][1] == "GCAGCT"
def test_pam_find_targets_fullgenome():
file =os.path.join(TEST_DIR, "test_data","Carsonella_ruddii.fasta")
pamobj = guidemaker.core.PamTarget("NGG", "5prime")
#gb = SeqIO.parse("forward.fasta", "fasta")
gb = SeqIO.parse(file, "fasta")
target = pamobj.find_targets(seq_record_iter=gb, target_len=20)
assert target['target'][0] == "AAATGGTACGTTATGTGTTA"
tardict = {'target': ['ATGCACATGCACTGCTGGAT','ATGCAAATTCTTGTGATCCA','CAAGCACTGCTGGATCACTG'],
'exact_pam': ["AGG","TGG","CGG"],
'start': [410, 1050, 1150],
'stop': [430, 1070, 1170],
'strand': [True, True, False], # forward =True, reverse = Fasle
'pam_orientation': [False,False, False], # 5prime =True, 3prime = Fasle
'seqid': ['AP009180.1','AP009180.2','AP009180.1'],
'seedseq': [np.nan, np.nan, np.nan],
'isseedduplicated': [np.nan, np.nan, np.nan]}
targets = | pd.DataFrame(tardict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# IMPORTS
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy import ndimage
import matplotlib.pyplot as plt
from skimage.io import imread
from rnaloc import toolbox
# Function definition
def process_folder(path_scan,
region_label,
bin_prop,
annotation_file='annotation.json',
output_path='acquisition>>analysis',
callback_log=None,
callback_status=None,
callback_progress=None):
"""[summary]
TODO: add docstring
Parameters
----------
path_scan : [type]
[description]
annotation_file : str, optional
[description], by default 'annotation.json'
"""
# Print all input parameters
toolbox.log_message(f"Function (process_folder) called with: {str(locals())} ", callback_fun=callback_log)
# Created bins for histogram
bins_hist = np.arange(bin_prop[0], bin_prop[1], bin_prop[2])
bins_width = 0.8 * (bins_hist[1] - bins_hist[0])
bins_center = (bins_hist[:-1] + bins_hist[1:]) / 2
for p_annotation in path_scan.rglob(f'*{annotation_file}*'):
# Get sample path
path_sample = p_annotation.parents[0]
toolbox.log_message(' ', callback_fun=callback_log)
toolbox.log_message(f'>> Analyzing {p_annotation}', callback_fun=callback_log)
# Open annotation
file_read = path_sample / annotation_file
if not p_annotation.is_file():
print(f'Annotation not found: {p_annotation}')
return
data_json, img_size = toolbox.read_annotation(p_annotation)
# Calculate distance transform for each annotated cell
# Note that coordinates are exchanged and y flipped
n_regs = 0
toolbox.log_message(f' [Create distance maps]: Loop over regions with label: {region_label}', callback_fun=callback_log)
if callback_status:
callback_status(f' [Create distance maps]: Loop over regions with label: {region_label}')
n_feats = len(data_json['features'])
for feat_ind, feat in enumerate(tqdm(data_json['features'], total=n_feats)):
label = feat['properties']['label']
if callback_progress:
callback_progress(feat_ind/n_feats)
if label == region_label:
reg_pos = np.squeeze(np.asarray(feat['geometry']['coordinates']))
reg_pos[:, [0, 1]] = reg_pos[:, [1, 0]]
reg_pos[:, 0] = -1*reg_pos[:, 0]+img_size[0]
mask_loop = toolbox.make_mask(reg_pos, img_size)
dist_nuc = ndimage.distance_transform_edt(np.logical_not(mask_loop))
if n_regs == 0:
dist_mat = np.copy(dist_nuc.astype('uint16'))
reg_masks = np.copy(mask_loop.astype('bool'))
else:
dist_mat = np.dstack((dist_mat, dist_nuc.astype('uint16')))
reg_masks = np.dstack((reg_masks, mask_loop.astype('bool')))
n_regs += 1
else:
toolbox.log_message(f' Label will not be processed: {label}',
callback_fun=callback_log)
toolbox.log_message(f' Number of annotated regions: {n_regs}',
callback_fun=callback_log)
if n_regs == 0:
toolbox.log_message(f'WARNING.\nNO regions with label "{region_label}"" found. Is this label correct?',
callback_fun=callback_log)
continue
# Loop over all FQ result files
for p_fq in path_sample.glob('*_spots*'):
toolbox.log_message(f' \nOpening FQ file: {p_fq}', callback_fun=callback_log)
# Get information (path, file name) to save results
file_base = p_fq.stem
# Load FQ results file
fq_dict = toolbox.read_FQ_matlab(p_fq)
spots_all = toolbox.get_rna(fq_dict)
# XY positions in pixel
if len(spots_all) == 0:
toolbox.log_message(f'No RNAs detected in this file.', callback_fun=callback_log)
continue
else:
pos_rna = np.divide(spots_all[:, 0:2], fq_dict['settings']['microscope']['pix_xy']).astype(int)
# Open FISH image
file_FISH_img = path_sample / fq_dict['file_names']['smFISH']
toolbox.log_message(f' Reading FISH image: {file_FISH_img}',callback_fun=callback_log)
img_FISH = imread(file_FISH_img)
# Folder to save results
path_save_base = toolbox.create_output_path(path_sample, output_path)
toolbox.log_message(f' Results will be saved here: {path_save_base}', callback_fun=callback_log)
path_save = path_save_base / 'analysis__cell_env' / file_base
toolbox.log_message(f' Results will be saved in folder: {path_save}',
callback_fun=callback_log)
if not path_save.is_dir():
path_save.mkdir(parents=True)
path_save_details = path_save / 'per_region'
if not path_save_details.is_dir():
path_save_details.mkdir(parents=True)
# Matrix with distance to all nuclei: each RNA is one row
rna_dist_regs_all = dist_mat[pos_rna[:, 0], pos_rna[:, 1], :]
# Sort matrix with shortest distance in first column
ind_closest_regs = np.argsort(rna_dist_regs_all, axis=1) # Index with sorted distance to nuclei
# Get for each RNA closest nuclei: index and distance
dist_closest_regs = np.take_along_axis(rna_dist_regs_all, ind_closest_regs, axis=1)
df_rna_dist = pd.DataFrame({'region_label': ind_closest_regs[:, 0],
'dist': dist_closest_regs[:, 0]
})
df_hist_RNA_all = pd.DataFrame({'bins_center': bins_center})
df_hist_PIX_all = | pd.DataFrame({'bins_center': bins_center}) | pandas.DataFrame |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
| tm.assert_index_equal(renamed.columns, new_columns) | pandas.util.testing.assert_index_equal |
# Copyright 2016 Feather Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
from feather.compat import guid
from feather import FeatherReader, FeatherWriter
import feather
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(feather.FeatherError):
FeatherReader('test_invalid_file')
def _check_pandas_roundtrip(self, df, expected=None):
path = random_path()
self.test_files.append(path)
feather.write_dataframe(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = feather.read_dataframe(path)
if expected is None:
expected = df
assert_frame_equal(result, expected)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
feather.write_dataframe(df, path)
reader = feather.FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
feather.write_dataframe(df, path)
reader = feather.FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = feather.read_dataframe(path)
assert_frame_equal(result, ex_frame)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(info.min,
min(info.max, np.iinfo('i8').max),
size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = feather.read_dataframe(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = feather.read_dataframe(path)
assert_frame_equal(result, ex_frame)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
self._check_pandas_roundtrip(df)
def test_strings(self):
repeats = 1000
values = [b'foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
values = ['foo', None, u'bar', 'qux', None]
expected = | pd.DataFrame({'strings': values * repeats}) | pandas.DataFrame |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,SEGResDct={}
,DruckResDct={}
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR']
,NrAsc=[False]+4*[True]
):
"""
Returns dfAlarmStatistik,dfAlarmEreignisse,SEGDruckResDct
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmStatistik=pd.DataFrame()
dfAlarmEreignisse=pd.DataFrame()
try:
# Ereignisse
dfAlarmEreignisse=buildDfAlarmEreignisse(
SEGResDct=SEGResDct
,DruckResDct=DruckResDct
,TCsLDSRes1=TCsLDSRes1
,TCsLDSRes2=TCsLDSRes2
,dfCVDataOnly=dfCVDataOnly
,dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt
,replaceTup=replaceTup
,NrBy=NrBy
,NrAsc=NrAsc
)
# in dfAlarmEreignisse erzeugte Alarm-Nr. an Dct merken
SEGResDct,DruckResDct=addNrToAlarmStatistikData(
SEGResDct
,DruckResDct
,dfAlarmEreignisse
)
# BZKat der Alarme
def fGetAlarmKat(row):
"""
"""
# baseID des Alarms
baseID=row['OrteIDs'][0]
# dct des Alarms
if row['LDSResBaseType']=='SEG':
dct=SEGResDct[baseID]
else:
dct=DruckResDct[baseID]
# Nrn der baseID
Nrn=dct['AL_S_NR']
# idx dieses Alarms innerhalb der Alarme der baseID
idxAl=Nrn.index(row['Nr'])
# Zustaende dieses alarms
SB_S=dct['AL_S_SB_S'][idxAl]
kat=''
if 3 in SB_S:
kat='instationär'
else:
if 2 in SB_S:
kat = 'schw. instationär'
else:
if 1 in SB_S:
kat = 'stat. Fluss'
elif 4 in SB_S:
kat = 'stat. Ruhe'
return kat
dfAlarmEreignisse['BZKat']=dfAlarmEreignisse.apply(lambda row: fGetAlarmKat(row),axis=1)
# Segment-verdichtete Druckergebnisse
SEGDruckResDct=processAlarmStatistikData2(
DruckResDct
,TCsLDSRes2
,dfSegsNodesNDataDpkt
)
# Alarmstatistik bilden
dfAlarmStatistik=dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]
dfAlarmStatistik=dfAlarmStatistik[['DIVPipelineName','SEGName','SEGNodes','SEGResIDBase']].drop_duplicates(keep='first').reset_index(drop=True)
dfAlarmStatistik['Nr']=dfAlarmStatistik.apply(lambda row: "{:2d}".format(int(row.name)),axis=1)
# SEG
dfAlarmStatistik['FörderZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Zustaendig'])
dfAlarmStatistik['FörderZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Alarm'])
dfAlarmStatistik['FörderZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Stoerung'])
dfAlarmStatistik['FörderZeitenAlAnz']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['FörderZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_SB_S'])
dfAlarmStatistik['FörderZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_NR'])
# Druck (SEG-verdichtet)
dfAlarmStatistik['RuheZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Zustaendig'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Alarm'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Stoerung'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_SB_S'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_NR'] if x in SEGDruckResDct.keys() else [])
#dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAlNrn'].apply(lambda x: len(x))
# je 3 Zeiten bearbeitet
dfAlarmStatistik['FörderZeit']=dfAlarmStatistik['FörderZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeit']=dfAlarmStatistik['RuheZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitAl']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitAl']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitSt']=dfAlarmStatistik['FörderZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitSt']=dfAlarmStatistik['RuheZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse, dfAlarmStatistik,SEGDruckResDct
def plotDfAlarmStatistik(
dfAlarmStatistik=pd.DataFrame()
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=dfAlarmStatistik[[
'Nr'
,'DIVPipelineName'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'FörderZeitSt'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
,'RuheZeitSt'
]].copy()
# diese Zeiten um (Störzeiten) annotieren
df['FörderZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['FörderZeit'],row['FörderZeitSt']) if row['FörderZeitSt'] > 0. else row['FörderZeit'] ,axis=1)
df['RuheZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['RuheZeit'],row['RuheZeitSt']) if row['RuheZeitSt'] > 0. else row['RuheZeit'],axis=1)
# LfdNr. annotieren
df['LfdNr']=df.apply(lambda row: "{:2d} - {:s}".format(int(row.Nr)+1,str(row.DIVPipelineName)),axis=1)
# Zeiten Alarm um Alarm-Nrn annotieren
def fAddZeitMitNrn(zeit,lAlNr):
if len(lAlNr) > 0:
if len(lAlNr) <= 3:
return "{!s:s} (Nrn.: {!s:s})".format(zeit,lAlNr)
else:
# mehr als 3 Alarme...
return "{!s:s} (Nrn.: {!s:s}, ...)".format(zeit,lAlNr[0])
else:
return zeit
df['FörderZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['FörderZeitAl'],row['FörderZeitenAlNrn']),axis=1)
df['RuheZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['RuheZeitAl'],row['RuheZeitenAlNrn']),axis=1)
df=df[[
'LfdNr'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
]]
try:
t=plt.table(cellText=df.values, colLabels=df.columns, loc='center')
cols=df.columns.to_list()
colIdxLfdNr=cols.index('LfdNr')
colIdxFoerderZeit=cols.index('FörderZeit')
colIdxFoerderZeitenAlAnz=cols.index('FörderZeitenAlAnz')
colIdxFoerderZeitAl=cols.index('FörderZeitAl')
colIdxRuheZeit=cols.index('RuheZeit')
colIdxRuheZeitenAlAnz=cols.index('RuheZeitenAlAnz')
colIdxRuheZeitAl=cols.index('RuheZeitAl')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
if row == 0:
if col in [colIdxRuheZeit,colIdxRuheZeitenAlAnz,colIdxRuheZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='plum')
elif col in [colIdxFoerderZeit,colIdxFoerderZeitenAlAnz,colIdxFoerderZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='lightsteelblue')
if col == colIdxLfdNr:
if row==0:
continue
if 'color' in dfAlarmStatistik.columns.to_list():
color=dfAlarmStatistik['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
if col == colIdxFoerderZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'FörderZeitSt']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxFoerderZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Förderzeit
if df.loc[row-1,'FörderZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # palegoldenrod
#if df.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
if col == colIdxRuheZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'RuheZeitSt']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxRuheZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Ruhezeit
if df.loc[row-1,'RuheZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
pass
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # # palegoldenrod
#if df.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def fOrteStripped(LDSResBaseType,OrteIDs):
"""
returns Orte stripped
"""
if LDSResBaseType == 'SEG': # 'Objects.3S_FBG_SEG_INFO.3S_L_6_MHV_02_FUD.In.']
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_'+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
elif LDSResBaseType == 'Druck': # Objects.3S_FBG_DRUCK.3S_6_BNV_01_PTI_01.In
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
else:
return None
def fCVDTime(row,dfSEG,dfDruck,replaceTup=('2021-','')):
"""
in:
dfSEG/dfDruck: TCsLDSRes1/TCsLDSRes2
row: Zeile aus dfAlarmEreignisse
von row verwendet:
LDSResBaseType: SEG (dfSEG) oder nicht (dfDruck)
OrteIDs: ==> ID von ZHKNR_S in dfSEG/dfDruck
ZHKNR: ZHKNR
returns:
string: xZeitA - ZeitEx
ZeitA: erste Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
ZeitE: letzte Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
xZeitA, wenn ZeitA die erste Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
xZeitE, wenn ZeitE die letzte Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
in Zeit wurde replaceTup angewendet
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
Time=""
ID=row['OrteIDs'][0]+'ZHKNR_S'
ZHKNR=row['ZHKNR']
if row['LDSResBaseType']=='SEG':
df=dfSEG
else:
df=dfDruck
s=df[df[ID]==ZHKNR][ID] # eine Spalte; Zeilen in denen ZHKNR_S den Wert von ZHKNR trägt
tA=s.index[0] # 1. Zeit
tE=s.index[-1] # letzte Zeit
Time=" {!s:s} - {!s:s} ".format(tA,tE)
try:
if tA==df[ID].dropna().index[0]:
Time='x'+Time.lstrip()
except:
logger.debug("{0:s}Time: {1:s}: x-tA Annotation Fehler; keine Annotation".format(logStr,Time))
try:
if tE==df[ID].dropna().index[-1]:
Time=Time.rstrip()+'x'
except:
logger.debug("{0:s}Time: {1:s}: x-tE Annotation Fehler; keine Annotation".format(logStr,Time))
Time=Time.replace(replaceTup[0],replaceTup[1])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return Time
def buildDfAlarmEreignisse(
SEGResDct={}
,DruckResDct={}
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR'] # Sortierspalten für die Nr. der Ereignisse
,NrAsc=[False]+4*[True] # aufsteigend j/n für die o.g. Sortierspalten
):
"""
Returns dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
###BZKat: Betriebszustandskategorie des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmEreignisse=pd.DataFrame()
try:
AlarmEvents=[] # Liste von AlarmEvent
AlarmEventsOrte={} # dct der Orte, die diesen (key) AlarmEvent melden
AlarmEventsZHKNRn={} # dct der ZHKNRn, die zu diesem (key) gehoeren
# über SEG- und Druck-Ergebnisvektoren
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for ResIDBase,dct in ResDct.items():
AL_S=dct['Alarm']
if len(AL_S) > 0:
# eine Erg-ID weist Alarme [(tA,tE),...] auf
# korrespondiernede Liste der ZHKs: [(999,1111),...]
ZHKNRnListen=dct['AL_S_ZHKNR_S']
ID=ResIDBase+'ZHKNR_S' # fuer nachfolgende Ausgabe
# ueber alle Alarme der Erg-ID
for idx,AL_S_Timepair in enumerate(AL_S):
(t1,t2)=AL_S_Timepair # tA, tE
ZHKNR_S_Lst=ZHKNRnListen[idx] # Liste der ZHKs in dieser Zeit
if len(ZHKNR_S_Lst) != 1:
logger.warning(("{:s}ID:\n\t {:s}: Alarm {:d} der ID\n\t Zeit von {!s:s} bis {!s:s}:\n\t Anzahl verschiedener ZHKNRn !=1: {:d} {:s}:\n\t ZHKNR eines Alarms wechselt waehrend eines Alarms. Alarm wird identifiziert mit 1. ZHKNR.".format(logStr,ID
,idx
,t1
,t2
,len(ZHKNR_S_Lst)
,str(ZHKNR_S_Lst)
)))
# die erste wird verwendet
ZHKNR=int(ZHKNR_S_Lst[0])
# AlarmEvent erzeugen
alarmEvent=AlarmEvent(t1,t2,ZHKNR,LDSResBaseType)
if alarmEvent not in AlarmEvents:
# diesen Alarm gibt es noch nicht in der Ereignisliste ...
AlarmEvents.append(alarmEvent)
AlarmEventsOrte[alarmEvent]=[]
AlarmEventsZHKNRn[alarmEvent]=[]
else:
pass
# Ort ergaenzen (derselbe Alarm wird erst ab V83.5.3 nur an einem Ort - dem lexikalisch kleinsten des Bilanzraumes - ausgegeben; zuvor konnte derselbe Alarm an mehreren Orten auftreten)
AlarmEventsOrte[alarmEvent].append(ResIDBase)
# ZHKNR(n) ergaenzen (ein Alarm wird unter 1 ZHKNR geführt)
AlarmEventsZHKNRn[alarmEvent].append(ZHKNR_S_Lst)
# df erzeugen
dfAlarmEreignisse=pd.DataFrame.from_records(
[alarmEvent for alarmEvent in AlarmEvents],
columns=AlarmEvent._fields
)
# Liste der EventOrte erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
l.append(AlarmEventsOrte[alarmEvent])
dfAlarmEreignisse['OrteIDs']=l
# abgekuerzte Orte
dfAlarmEreignisse['Orte']=dfAlarmEreignisse.apply(lambda row: fOrteStripped(row.LDSResBaseType,row.OrteIDs),axis=1)
dfAlarmEreignisse['Ort']=dfAlarmEreignisse['Orte'].apply(lambda x: x[0])
# Liste der ZHKNRn erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
lOfZl=AlarmEventsZHKNRn[alarmEvent]
lOfZ=[*{*chain.from_iterable(lOfZl)}]
lOfZ=sorted(pd.unique(lOfZ))
l.append(lOfZ)
dfAlarmEreignisse['ZHKNRn']=l
# Segmentname eines Ereignisses
dfAlarmEreignisse['SEGName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==row['OrteIDs'][0]]['SEGName'].iloc[0] if row['LDSResBaseType']=='SEG'
else [tuple for tuple in getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,row['OrteIDs'][0]) if not tuple[-1]][0][1],axis=1)
# DIVPipelineName eines Ereignisses
dfAlarmEreignisse['DIVPipelineName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DIVPipelineName'].iloc[0]
,axis=1)
# Alarm: ---
#tA: Anfangszeit
#tE: Endezeit
#ZHKNR: ZHKNR (1. bei mehreren Alarmen)
#LDSResBaseType: SEG oder Druck
# Orte: ---
#OrteIDs: OrteIDs des Alarms
#Orte: Kurzform von OrteIDs des Alarms
#ZHKNRn:
#SEGName: Segmentname
#DIVPipelineName
## Nr.
dfAlarmEreignisse.sort_values(by=NrBy,ascending=NrAsc,inplace=True)
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
#dfAlarmEreignisse['Nr']=dfAlarmEreignisse['Nr']+1
logger.debug("{0:s}{1:s}: {2:s}".format(logStr,'dfAlarmEreignisse',dfAlarmEreignisse.to_string()))
# Voralarm
VoralarmTypen=[]
for index, row in dfAlarmEreignisse.iterrows():
# zur Information bei Ausgaben
OrteIDs=row['OrteIDs']
OrtID=OrteIDs[0]
VoralarmTyp=None
try:
if row['LDSResBaseType']=='SEG':
VoralarmTyp=TCsLDSRes1.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
elif row['LDSResBaseType']=='Druck':
VoralarmTyp=TCsLDSRes2.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
except:
pass
if pd.isnull(VoralarmTyp): # == None: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=-1
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: kein (isnull) Vorlalarm gefunden?! (ggf. neutraler BRWechsel) - Voralarm gesetzt auf: {:d}".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA'],int(VoralarmTyp)))
if int(VoralarmTyp)==0: # == 0: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=0
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: Vorlalarm 0?! (ggf. war Bilanz in Stoerung)".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA']))
if int(VoralarmTyp) not in [-1,0,3,4,10]:
logger.warning("{:s}PV: {:s} Alarm Nr. {:d} {:d} tA {!s:s}: unbekannter Vorlalarm gefunden: {:d}".format(logStr,row['OrteIDs'][0],int(row['Nr']),row['ZHKNR'],row['tA'],int(VoralarmTyp)))
logger.debug("{:s}{:d} {!s:s} VoralarmTyp:{:d}".format(logStr,int(row['Nr']),row['tA'],int(VoralarmTyp)))
VoralarmTypen.append(VoralarmTyp)
dfAlarmEreignisse['Voralarm']=[int(x) for x in VoralarmTypen]
# Type (aus dfCVDataOnly) und Erzeugungszeit (aus dfCVDataOnly) und Name (aus dfCVDataOnly)
dfAlarmEreignisse['ZHKNR']=dfAlarmEreignisse['ZHKNR'].astype('int64')
dfAlarmEreignisse['ZHKNRStr']=dfAlarmEreignisse['ZHKNR'].astype('string')
dfCVDataOnly['ZHKNRStr']=dfCVDataOnly['ZHKNR'].astype('string')
# wg. aelteren App-Log Versionen in denen ZHKNR in dfCVDataOnly nicht ermittelt werden konnte
# Type,ScenTime,Name sind dann undefiniert
dfAlarmEreignisse=pd.merge(dfAlarmEreignisse,dfCVDataOnly,on='ZHKNRStr',suffixes=('','_CVD'),how='left').filter(items=dfAlarmEreignisse.columns.to_list()+['Type'
#,'ScenTime'
,'Name'])
dfAlarmEreignisse=dfAlarmEreignisse.drop(['ZHKNRStr'],axis=1)
dfAlarmEreignisse=dfAlarmEreignisse.fillna(value='')
# lfd. Nummern
dfAlarmEreignisse['NrSD']=dfAlarmEreignisse.groupby(['LDSResBaseType']).cumcount() + 1
dfAlarmEreignisse['NrName']=dfAlarmEreignisse.groupby(['Name']).cumcount() + 1
dfAlarmEreignisse['NrSEGName']=dfAlarmEreignisse.groupby(['SEGName']).cumcount() + 1
# Lebenszeit der ZHKNR
try:
dfAlarmEreignisse['tD_ZHKNR']=dfAlarmEreignisse.apply(lambda row: fCVDTime(row,TCsLDSRes1,TCsLDSRes2,replaceTup),axis=1)
except:
logger.debug("{:s}Spalte tD_ZHKNR (Lebenszeit einer ZHKNR) konnte nicht ermittelt werden. Vmtl. aeltere App-Log Version.".format(logStr))
dfAlarmEreignisse['tD_ZHKNR']='-1'
# Dauer des Alarms
dfAlarmEreignisse['tD']=dfAlarmEreignisse.apply(lambda row: row['tE']-row['tA'],axis=1)
dfAlarmEreignisse['tD']= dfAlarmEreignisse['tD'].apply(lambda x: "{!s:s}".format(x).replace('days','Tage').replace('0 Tage','').replace('Tage','T'))
# AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
dfAlarmEreignisse=dfAlarmEreignisse[['Nr','tA', 'tE','tD','ZHKNR','tD_ZHKNR','ZHKNRn','LDSResBaseType'
,'OrteIDs', 'Orte', 'Ort', 'SEGName','DIVPipelineName'
,'Voralarm', 'Type', 'Name'
,'NrSD', 'NrName', 'NrSEGName'
]]
dfAlarmEreignisse['AlarmEvent']=dfAlarmEreignisse.apply(lambda row: AlarmEvent(row['tA'],row['tE'],row['ZHKNR'],row['LDSResBaseType']),axis=1)
# unklar, warum erforderlich
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fCVDName(Name
):
"""
"""
lName=len(Name)
if len(Name)==0:
Name='ZHKName vmtl. nicht in Log'
lNameMaxH=20
if lName > 2*lNameMaxH:
Name=Name[:lNameMaxH-2]+'....'+Name[lName-lNameMaxH+2:]
Name=Name.replace('°','|')
return Name
def plotDfAlarmEreignisse(
dfAlarmEreignisse=pd.DataFrame()
,sortBy=[]
,replaceTup=('2021-','')
,replaceTuptD=('0 days','')
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=dfAlarmEreignisse[['Nr','LDSResBaseType','Voralarm','Type','NrSD','tA','tE','tD','ZHKNR','Name','Orte','tD_ZHKNR','NrName','NrSEGName','SEGName','BZKat']].copy()
df['tA']=df['tA'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
df['tE']=df['tE'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
###df['Anz']=df['Orte'].apply(lambda x: len(x))
##df['Orte']=df['Orte'].apply(lambda x: str(x).replace('[','').replace(']','').replace("'",""))
df['Orte']=df['Orte'].apply(lambda x: str(x[0]))
df['LDSResBaseType']=df.apply(lambda row: "{:s} {:s} - {:d}".format(row['LDSResBaseType'],row['Type'],row['Voralarm']),axis=1)
df=df[['Nr','LDSResBaseType','NrSD','tA','tE','tD','ZHKNR','Name','NrName','NrSEGName','SEGName','tD_ZHKNR','Orte','BZKat']]
df.rename(columns={'LDSResBaseType':'ResTyp - Voralarm'},inplace=True)
df.rename(columns={'tD_ZHKNR':'ZHKZeit','Name':'ZHKName'},inplace=True)
###df['ZHKName']=df['ZHKName'].apply(lambda x: fCVDName(x))
####df['ZHKName']=df['Orte'].apply(lambda x: x[0])
df['NrSEGName (SEGName)']=df.apply(lambda row: "{!s:2s} ({!s:s})".format(row['NrSEGName'],row['SEGName']),axis=1)
df=df[['Nr','ResTyp - Voralarm','NrSD','tA','tD','ZHKNR'
,'Orte' #'ZHKName'
,'BZKat'
,'NrName','NrSEGName (SEGName)','ZHKZeit']]
df.rename(columns={'Orte':'ID'},inplace=True)
df['tD']=df['tD'].apply(lambda x: str(x).replace(replaceTuptD[0],replaceTuptD[1]))
def fGetZHKNRStr(row,dfOrig):
"""
returns:
ZHKNStr in Abhängigkeit der aktuellen Zeile und dfOrig
"""
s=dfOrig[dfOrig['Nr']==row['Nr']].iloc[0]
if len(s.ZHKNRn)>1:
if len(s.ZHKNRn)==2:
return "{:d} ({!s:s})".format(row['ZHKNR'],s.ZHKNRn[1:])
else:
return "{:d} (+{:d})".format(row['ZHKNR'],len(s.ZHKNRn)-1)
else:
return "{:d}".format(row['ZHKNR'])
df['ZHKNR']=df.apply(lambda row: fGetZHKNRStr(row,dfAlarmEreignisse),axis=1)
if sortBy!=[]:
df=df.sort_values(by=sortBy)
t=plt.table(cellText=df.values, colLabels=df.columns
,colWidths=[.03,.1 # Nr ResTyp-Voralarm
,.04 # NrSD
,.08,.08 # tA tD
,.085 # ZHKNR
,.1125,.07 #.1125 # ID BZKat
,.04 # NrName
,.14 # NrSEGName (SEGName)
,.2125] # ZHKZeit
, cellLoc='left'
, loc='center')
t.auto_set_font_size(False)
t.set_fontsize(10)
cols=df.columns.to_list()
#colIdxOrte=cols.index('Orte')
#colIdxName=cols.index('ZHKName')
colIdxNrSD=cols.index('NrSD')
colIdxNrSEG=cols.index('NrSEGName (SEGName)')
# ResTyp - Voralarm
colIdxResTypVA=cols.index('ResTyp - Voralarm')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
#if col == colIdxName:
# cellObj.set_text_props(ha='left')
if col == colIdxNrSD:
if row > 0:
if dfAlarmEreignisse.loc[row-1,'LDSResBaseType']=='SEG':
cellObj.set_text_props(backgroundcolor='lightsteelblue')
else:
cellObj.set_text_props(backgroundcolor='plum')
elif col == colIdxNrSEG:
if row==0:
continue
if 'color' in dfAlarmEreignisse.columns.to_list():
color=dfAlarmEreignisse['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
elif col == colIdxResTypVA and row > 0:
pass
if dfAlarmEreignisse.loc[row-1,'Voralarm'] in [10]:
cellObj.set_text_props(backgroundcolor='sandybrown')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [4]:
cellObj.set_text_props(backgroundcolor='pink')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [3]:
cellObj.set_text_props(backgroundcolor='lightcoral')
else:
pass
#cellObj.set_text_props(fontsize=16)
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def plotDfAlarmStatistikReportsSEGErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,SEGResDct={}
,timeStart=None,timeEnd=None
,SEGErgsFile='SEGErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H' # Runden (1 Stunde)
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with FörderZeitenAlAnz>0
1 Base Plot and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr)
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
logger.debug("{0:s}timeStart (ohne timeShift): {1:s} timeEnd (ohne timeShift): {2:s}".format(logStr,str(timeStart),str(timeEnd)))
xlimsDct={}
pdf=PdfPages(SEGErgsFile)
(fileNameBase,ext)= os.path.splitext(SEGErgsFile)
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,row['SEGResIDBase'])
if row['FörderZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz: 0".format(titleStr))
continue # keine SEGs ohne Alarme drucken
# Erg lesen
ResIDBase=row['SEGResIDBase']
dfSegReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='SEG',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
ID='AL_S'
if ID not in dfSegReprVec.keys():
continue
idxSEGPlotted=idxSEGPlotted+1
xlimsDct[ResIDBase]=[]
logger.debug("{:s}ResIDBase: {:s} dfSegReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfSegReprVec.columns.to_list()))
# Plot Basis ###########################################################
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
,plotLegend=True
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
if row['FörderZeitenAlAnz'] > 0:
if row['FörderZeitenAlAnz'] <= 3:
txtNr=" Nrn.: {!s:s}".format(row['FörderZeitenAlNrn'])
else:
txtNr=" Nrn.: {!s:s} u.w.".format(row['FörderZeitenAlNrn'][0])
txt=txt+txtNr
else:
txtNr=''
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,txtNr.replace('Nrn.: ','Nrn ').replace(',','').replace('[','').replace(']','').replace('u.w.','u w'))
plt.savefig(fileName)
plt.show()
###plt.clf()
plt.close()
# Plot Alarme ###########################################################
dct=SEGResDct[row['SEGResIDBase']]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['FörderZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
# wenn AlarmRand - PlotRand < 3 Minuten: um 3 Minuten erweitern
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
txtNr=" Nr.: {!s:s}".format(AlNr)
txt=txt+txtNr
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
#logger.info("{:s}".format(titleStr))
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
#(fileName,ext)= os.path.splitext(SEGErgsFile)
fileNameAlarm="{:s} {:s}.png".format(fileName.replace('.png','')
,txtNr.replace('Nr.: ','Nr ').replace(',','').replace('[','').replace(']',''))
plt.savefig(fileNameAlarm)
plt.show()
###plt.clf()
plt.close()
###plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotDfAlarmStatistikReportsDruckErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,DruckResDct={}
,timeStart=None,timeEnd=None
,DruckErgsFile='DruckErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H'
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with RuheZeitenAlAnz>0
1 Base Plot for a Druck with an Alarm and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
logger.debug("{0:s}firstTime (ohne TimeShift): {1:s} lastTime (ohne TimeShift): {2:s}".format(logStr,str(firstTime),str(lastTime)))
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr) # https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
logger.debug("{0:s}timeStart abgerundet (ohne TimeShift): {1:s} timeEnd aufgerundet (ohne TimeShift): {2:s} TimeShift: {3:s}".format(logStr
,str(timeStart)
,str(timeEnd)
,str(timeDelta)))
xlimsDct={}
pdf=PdfPages(DruckErgsFile)
(fileNameBase,ext)= os.path.splitext(DruckErgsFile)
# über alle Segmente der Alarmstatistik (die DruckIDs sollen in der Reihenfolge der Alarmstatistik abgearbeitet werden)
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if row['RuheZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("LfdNr {:2d} - {:s}: {:s}: RuheZeitenAlAnz: 0".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']))
continue # keine SEGs ohne Alarme drucken
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
idxSEGPlotted=idxSEGPlotted+1
# DruckIDs eines Segmentes
DruckIDs=sorted([ID for ID in dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DruckResIDBase'].unique() if not pd.isnull(ID)])
for idxDruckID,DruckResIDBase in enumerate(DruckIDs):
dct=DruckResDct[DruckResIDBase]
if len(dct['Alarm'])==0:
# nur DruckIDs mit Alarmen plotten
continue
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
# Erg lesen
ResIDBase=DruckResIDBase
dfDruckReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='Druck',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
logger.debug("{:s}ResIDBase: {:s} dfDruckReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfDruckReprVec.columns.to_list()))
logger.debug("{:s}ID: {:s}: timeStart (mit TimeShift): {:s} timeEnd (mit TimeShift): {:s}".format(logStr
,DruckResIDBase
,str(dfDruckReprVec.index[0])
,str(dfDruckReprVec.index[-1])
))
ID='AL_S'
if ID not in dfDruckReprVec.keys():
continue
xlimsDct[ResIDBase]=[]
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten))
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,fOrteStripped('Druck',[DruckResIDBase])[0]
)
plt.savefig(fileName)
plt.show()
pdf.savefig(fig)
plt.close()
# Plot Alarme ###########################################################
dct=DruckResDct[DruckResIDBase]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['RuheZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d} Nr. {:4d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten)
,AlNr)
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileNameAlarm="{:s} Nr {:d}.png".format(fileName.replace('.png',''),AlNr)
plt.savefig(fileNameAlarm)
plt.show()
plt.close()
#plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotTimespans(
xlims # list of sections
,orientation='landscape' # oben HYD unten LDS; 'portrait': # links HYD rechts LDS
,pad=3.5 # tight_layout() can take keyword arguments of pad, w_pad and h_pad. These control the extra padding around the figure border and between subplots. The pads are specified in fraction of fontsize.
,w_pad=0.5
,h_pad=0.5
# 'portrait' # links HYD rechts LDS
,rectSpalteLinks=[0, 0, 0.5, 1]
,rectSpalteRechts=[0.325, 0, 1, 1]
# 'landscape': # oben HYD unten LDS
,rectZeileOben=[0, .5, 1, 1]
,rectZeileUnten=[0, 0, 1, .5]
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,figTitle='' #!
,figSave=False #!
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,sectionTitlesLDS=None # list of section titles to be used
,sectionTextsLDS=None # list of section texts to be used
,vLinesX=[] # plotted in each HYD section if X-time fits
,hLinesY=[] # plotted in each HYD section
,vAreasX=[] # for each HYD section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXLDS=None # plotted in each LDS section if X-time fits
,vAreasXLDS=None # for each LDS section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
,vLinesXColorLDS=None
,vAreasXColorLDS=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# --- Args Fct. HYD ---:
,TCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,TCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,TCsOPCScenTimeShift=pd.Timedelta('1 hour')
,TCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,TCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,TCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,TCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={}
,pDct={}
,QDctOPC={}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={}
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
# --- Args Fct. LDS ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
,ylimAL=ylimALD
,yticksAL=yticksALD
,ylimR=ylimRD #can be a list #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False # can be a list #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD # can be a list of lists #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
fig=plt.gcf()
if orientation=='landscape':
# oben HYD unten LDS
gsHYD = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.ncols)]
gsLDS = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.ncols)]
else:
# links HYD rechts LDS
gsHYD = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.nrows)]
gsLDS = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.nrows)]
pltLDSpQAndEventsResults=plotTimespansHYD(
axLst=axLstHYD
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitles
,sectionTexts=sectionTexts
,vLinesX=vLinesX
,hLinesY=hLinesY
,vAreasX=vAreasX
,vLinesXColor=vLinesXColor
,vAreasXColor=vAreasXColor
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfTCsLDSIn=TCsLDSIn
,dfTCsOPC=TCsOPC
,dfTCsOPCScenTimeShift=TCsOPCScenTimeShift
,dfTCsSIDEvents=TCsSIDEvents
,dfTCsSIDEventsTimeShift=TCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=TCsSIDEventsInXlimOnly
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
,yGridSteps=yGridSteps
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
)
if orientation=='landscape':
# oben HYD unten LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileOben)
else:
# links HYD rechts LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteLinks)
if sectionTitlesLDS==None:
sectionTitlesLDS=sectionTitles
if sectionTextsLDS==None:
sectionTextsLDS=sectionTexts
if vLinesXLDS==None:
vLinesXLDS=vLinesX
if vAreasXLDS==None:
vAreasXLDS=vAreasX
if vLinesXColorLDS==None:
vLinesXColorLDS=vLinesXColor
if vAreasXColorLDS==None:
vAreasXColorLDS=vAreasXColor
pltLDSErgVecResults=plotTimespansLDS(
axLst=axLstLDS
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitlesLDS
,sectionTexts=sectionTextsLDS
,vLinesX=vLinesXLDS
,vAreasX=vAreasXLDS
,vLinesXColor=vLinesXColorLDS
,vAreasXColor=vAreasXColorLDS
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,ylimR=ylimR
,ylimRxlim=ylimRxlim
,yticksR=yticksR
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
# wenn weniger als 5 Achsen geplottet werden stimmt der erste Wert von rectSpalteRechts nicht
#(axes,lines)=pltLDSErgVecResults[0]
#
# numOfYAxes=len(axes)
#corFac=5-numOfYAxes
#rectSpalteRechtsCor=rectSpalteRechts #[0.325, 0, 1, 1]
#rectSpalteRechtsCor[0]=rectSpalteRechtsCor[0]+0.06*corFac
if orientation=='landscape':
# oben HYD unten LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileUnten)
else:
# links HYD rechts LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteRechts)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults
def plotTimespansHYD(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,hLinesY=[] # plotted in each section
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfTCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={ # Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSrc 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None#[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSpQAndEvents selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
# plots pltLDSpQAndEvents-Sections
# returns a Lst of pltLDSpQAndEvents-Results, a Lst of (axes,lines,scatters)
try:
if sectionTitles==[] or sectionTitles==None:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSpQAndEventsResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
(axes,lines,scatters)=pltLDSpQAndEvents(
ax
,dfTCsLDSIn=dfTCsLDSIn
,dfTCsOPC=dfTCsOPC
,dfTCsOPCScenTimeShift=dfTCsOPCScenTimeShift
,dfTCsSIDEvents=dfTCsSIDEvents
,dfTCsSIDEventsTimeShift=dfTCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=dfTCsSIDEventsInXlimOnly
,dfTCsSIDEventsyOffset=dfTCsSIDEventsyOffset
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,xlim=xlim
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
# 3. Achse
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
,yGridSteps=yGridSteps
,plotLegend=plotLegendFct
,baseColorsDef=baseColorsDef
)
pltLDSpQAndEventsResults.append((axes,lines,scatters))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
for hLineY in hLinesY:
ax.axhline(y=hLineY,xmin=0, xmax=1,color='gray',ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly:
legendHorizontalPos='center' # wenn nur 1x Legende dann Mitte
if plotLegend1stOnly and idx>0:
pass
else:
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSpQAndEventsResults
def plotTimespansLDS(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
#,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD
,yTwinedAxesPosDeltaHPStart=-0.0125
,yTwinedAxesPosDeltaHP=-0.0875
,ylimR=ylimRD # can be a list
,ylimRxlim=False # can be a list
,yticksR=yticksRD # can be a list
# dito Beschl.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
# plots pltLDSErgVec-Sections
# returns a Lst of pltLDSErgVec-Results, a Lst of (axes,lines)
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if sectionTitles==[] or sectionTitles ==None:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSErgVecResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
ylimRIdx=ylimR
if isinstance(ylimR, list):
ylimRIdx=ylimR[idx]
ylimRxlimIdx=ylimRxlim
if isinstance(ylimRxlim, list):
ylimRxlimIdx=ylimRxlim[idx]
yticksRIdx=yticksR
if isinstance(yticksR, list):
if any(isinstance(el, list) for el in yticksR):
yticksRIdx=yticksR[idx]
(axes,lines)=pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,xlim=xlims[idx]
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,ylimAL=ylimAL
,yticksAL=yticksAL
,yTwinedAxesPosDeltaHPStart=yTwinedAxesPosDeltaHPStart
,yTwinedAxesPosDeltaHP=yTwinedAxesPosDeltaHP
,ylimR=ylimRIdx
,ylimRxlim=ylimRxlimIdx
,yticksR=yticksRIdx
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,ySpanMin=ySpanMin
,plotLegend=plotLegendFct
,legendLoc=legendLoc
,legendFramealpha=legendFramealpha
,legendFacecolor=legendFacecolor
,attrsDctLDS=attrsDctLDS
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,plotACCLimits=plotACCLimits
,highlightAreas=highlightAreas
,Seg_Highlight_Color=Seg_Highlight_Color
,Seg_Highlight_Alpha=Seg_Highlight_Alpha
,Seg_Highlight_Fct=Seg_Highlight_Fct
,Seg_HighlightError_Color=Seg_HighlightError_Color
,Seg_Highlight_Alpha_Error=Seg_Highlight_Alpha_Error #
,Seg_HighlightError_Fct=Seg_HighlightError_Fct
,Druck_Highlight_Color=Druck_Highlight_Color
,Druck_Highlight_Alpha=Druck_Highlight_Alpha
,Druck_Highlight_Fct=Druck_Highlight_Fct
,Druck_HighlightError_Color=Druck_HighlightError_Color
,Druck_Highlight_Alpha_Error=Druck_Highlight_Alpha_Error #
,Druck_HighlightError_Fct=Druck_HighlightError_Fct
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
pltLDSErgVecResults.append((axes,lines))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly and idx>0:
pass
else:
if not dfSegReprVec.empty:
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternSeg,line) != None])
,tuple([line for line in lines if re.search(patternSeg,line) != None])
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternDruck,line) != None])
,tuple([line for line in lines if re.search(patternDruck,line) != None])
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSErgVecResults
def pltLDSpQAndEvents(
ax
,dfTCsLDSIn # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorgenannten Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents= | pd.DataFrame() | pandas.DataFrame |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O
import seaborn as sns
import matplotlib.pyplot as plt
import gif
plt.style.use('fivethirtyeight')
#Data Source from KAggle: https://www.kaggle.com/jeanmidev/smart-meters-in-london
df=pd.read_csv('london_weather_hourly_darksky.csv')
#Renaming the time coloumn
df=df.rename(columns={"time": "date"})
#Converting it to the appropriate date format
df['date']=pd.to_datetime(df['date'])
#Indexing the date and droping the column
df.set_index('date',drop=True, inplace=True)
# Resampling on a monthly bases
df=df.resample('M').mean()
END=df.index[-1]
START=df.index[0]
@gif.frame
def plot_split(df,date,split_date):
df=df.loc[df.index[0]: | pd.Timestamp(date) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""SonDenemeler.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19x1FeWR8BZ3sWyZqbRuUR8msEuL1RXzm
"""
from google.colab import drive
drive.mount("/content/drive")
"""# Model 1"""
from __future__ import print_function
import numpy as np # For numerical fast numerical calculations
import matplotlib.pyplot as plt # For making plots
import pandas as pd # Deals with data
import seaborn as sns # Makes beautiful plots
import keras
import sys
from pandas import pandas as pd
#from sklearn.preprocessing import CategoricalEncoder as ce #import category_encoders as ce
import datetime
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import os
import glob
import numpy as np
import scipy as sp
import pandas as pd
# skimage
from skimage.io import imshow, imread, imsave
from skimage.transform import rotate, AffineTransform, warp,rescale, resize, downscale_local_mean
from skimage import color,data
from skimage.exposure import adjust_gamma
from skimage.util import random_noise
# imgaug
import imageio
import imgaug as ia
import imgaug.augmenters as iaa
# Albumentations
import albumentations as A
# Keras
from keras.preprocessing.image import ImageDataGenerator,array_to_img, img_to_array, load_img
#visualisation
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#%matplotlib inline
import seaborn as sns
from IPython.display import HTML, Image
import cv2
import os
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
import seaborn as sns
# load data
#p_train=pd.read_csv('/content/drive/MyDrive/Plant_Pathology_2020/train.csv')
#p_test=pd.read_csv('/content/drive/MyDrive/Plant_Pathology_2020/test.csv')
import numpy as np # For numerical fast numerical calculations
#import matplotlib.pyplot as plt # For making plots
import pandas as pd # Deals with data
#import seaborn as sns # Makes beautiful plots
import keras
#import sys
#from pandas import pandas as pd
#from sklearn.preprocessing import CategoricalEncoder as ce #import category_encoders as ce
#import datetime
from keras.models import Sequential
#from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.preprocessing import image
#from keras.preprocessing.image import ImageDataGenerator
import keras.optimizers
from tensorflow.python.keras.optimizer_v2.adam import Adam
#from sklearn.svm import SVC
import os
import glob
#import numpy as np
#import scipy as sp
#import pandas as pd
# skimage
from skimage.io import imshow, imread, imsave
#from skimage.transform import rotate, AffineTransform, warp,rescale, resize, downscale_local_mean
#from skimage import color,data
#from skimage.exposure import adjust_gamma
#from skimage.util import random_noise
# imgaug
#import imageio
#import imgaug as ia
#import imgaug.augmenters as iaa
# Albumentations
#import albumentations as A
# Keras
from keras.preprocessing.image import ImageDataGenerator,array_to_img, img_to_array, load_img
#visualisation
import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#%matplotlib inline
#import seaborn as sns
#from IPython.display import HTML, Image
import cv2
p_train= | pd.read_csv('/content/drive/MyDrive/Plant_Pathology_2020/train.csv') | pandas.read_csv |
from data import *
import numpy as np
import pdb
import pandas as pd
def kppv(k,test,data_train,train_label):
predict = [] #liste des prédictions
for indx in test.index:
test_line = test.loc[indx]
neighbors = find_kppv_neighbors(k,test_line,data_train) #on récupère les kppv
neighbors['label'] = train_label.loc[neighbors['index']].reset_index(drop = True) #on récupère leur label
labels_neighbors = pd.Series([0,0,0],index = [0,1,2],name = 'counter')
labels_neighbors += neighbors['label'].value_counts()
labels_neighbors = labels_neighbors.fillna(0).sort_values(ascending = False).reset_index(drop = False) #on récupère trié les classes représentées par les kppv
if labels_neighbors.loc[0,'counter'] == labels_neighbors.loc[1,'counter'] : #Si il n'y a pas de majorité
neighbors['weight'] = 1/neighbors['l1'] #On pondère l'occurence de la classe de chaque fleur learn étudiées par l'inverse de la distance qui leur est associée
final = neighbors[['weight','label']].groupby(['label']).sum().sort_values(by = 'weight', ascending = False) #On obtient les classes triés par la somme des 1/d
predict.append(final.index[0]) #Il est également encore possible qu'il y ait égalité, soit prendre random soit prendre celui avec le nbr le plus faible
else :
predict.append(labels_neighbors.loc[0,'index'])
return predict
def metrics_kppv_k(k,return_pred = False):
data_train = pd.DataFrame(IRIS_LEARN_DATA) #Data store in another fichier .py, if csv format use pd.read_csv, if .jpg use matplotlib.pyplot
data_test = pd.DataFrame(IRIS_TEST_DATA)
label_test = | pd.Series(IRIS_TEST_LABEL) | pandas.Series |
import pandas as pd
period = pd.Period('2020-06', freq='M')
print(period)
print(period.asfreq('D', 'start'))
print(period.asfreq('D', 'end'))
# Can perform period arithmetic - increment month
print(period + 1)
# Can create period range per month in a year
monthly_period_range = pd.period_range('2020-01', '2021-12', freq='M')
print(monthly_period_range)
# Creating a Period covering all years in a range
year_period_range = | pd.period_range('2015', '2021', freq='A-DEC') | pandas.period_range |
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionLogicalXorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_logical_xor_scalar(self):
self.assertEqual(dnp.logical_xor(1.2 + 1j, 1.2 - 1j), np.logical_xor(1.2 + 1j, 1.2 - 1j))
self.assertEqual(dnp.logical_xor(0.5, 9), np.logical_xor(0.5, 9))
self.assertEqual(dnp.logical_xor(-1, 8.5), np.logical_xor(-1, 8.5))
self.assertEqual(dnp.logical_xor(1, 4), np.logical_xor(1, 4))
self.assertEqual(dnp.logical_xor(1, -5), np.logical_xor(1, -5))
self.assertEqual(dnp.logical_xor(0, 9), np.logical_xor(0, 9))
self.assertEqual(dnp.logical_xor(dnp.nan, -5), np.logical_xor(dnp.nan, -5))
def test_function_math_binary_logical_xor_list(self):
lst1 = [0, 1, 2]
lst2 = [4, 6, 9]
assert_array_equal(dnp.logical_xor(lst1, lst2), np.logical_xor(lst1, lst2))
def test_function_math_binary_logical_xor_array_with_scalar(self):
npa = np.array([0, 1, 2])
dnpa = dnp.array([0, 1, 2])
assert_array_equal(dnp.logical_xor(dnpa, 1), np.logical_xor(npa, 1))
assert_array_equal(dnp.logical_xor(dnpa, dnp.nan), np.logical_xor(npa, np.nan))
assert_array_equal(dnp.logical_xor(1, dnpa), np.logical_xor(1, npa))
def test_function_math_binary_logical_xor_array_with_array(self):
npa1 = np.array([0, 1, 2])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([0, 1, 2])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.logical_xor(dnpa1, dnpa2), np.logical_xor(npa1, npa2))
def test_function_math_binary_logical_xor_array_with_array_param_out(self):
npa1 = np.array([0, 1, 2])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([0, 1, 2])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.logical_xor(npa1, npa2, out=npa)
dnp.logical_xor(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_logical_xor_array_with_series(self):
npa = np.array([0, 1, 2])
dnpa = dnp.array([0, 1, 2])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.logical_xor(dnpa, os).to_pandas(), np.logical_xor(npa, ps))
assert_series_equal(dnp.logical_xor(os, dnpa).to_pandas(), np.logical_xor(ps, npa))
pser = | pd.Series([1, 2, 4]) | pandas.Series |
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, List, Union
from collections import OrderedDict
from pathos.multiprocessing import ThreadPool as Pool
from tqdm import tqdm
from src.utils import remap_label, get_type_instances
from .metrics import PQ, AJI, AJI_plus, DICE2, split_and_merge
class Benchmarker:
def compute_metrics(
self,
true_pred: List[np.ndarray]
) -> Dict[str, float]:
"""
Computes metrics for one (inst_map, gt_mask) pair.
If GT does not contain any nuclear objects, returns None
Args:
-----------
true_pred (List[np.ndarray]):
Ground truth annotations in true_pred[1] and
corresponding predicted instance map in true_pred[2]
Returns:
-----------
A Dict[str, float] of the metrics
"""
name = true_pred[0]
true = true_pred[1]
pred = true_pred[2]
# Skip empty GTs
if len(np.unique(true)) > 1:
true = remap_label(true)
pred = remap_label(pred)
pq = PQ(true, pred)
aji = AJI(true, pred)
aji_p = AJI_plus(true, pred)
dice2 = DICE2(true, pred)
splits, merges = split_and_merge(true, pred)
result = {
"name":name,
"AJI": aji,
"AJI_plus": aji_p,
"DICE2": dice2,
"PQ": pq["pq"],
"SQ": pq["sq"],
"DQ": pq["dq"],
"inst_recall": pq["recall"],
"inst_precision": pq["precision"],
"splits": splits,
"merges": merges
}
return result
def benchmark_insts(
self,
inst_maps: Dict[str, np.ndarray],
gt_masks: Dict[str, np.ndarray],
pattern_list: List[str]=None,
save_dir: Union[str, Path]=None,
prefix: str=""
) -> pd.DataFrame:
"""
Run benchmarking metrics for instance maps for all of the files
in the dataset. Note that the inst_maps and gt_masks need to
share exact same keys and be sorted so that they align when
computing metrics.
Args:
-----------
inst_maps (OrderedDict[str, np.ndarray]):
A dict of file_name:inst_map key vals in order
gt_masks (OrderedDict[str, np.ndarray]):
A dict of file_name:gt_inst_map key vals in order
pattern_list (List[str], default=None):
A list of patterns contained in the gt_mask and inst_map
names. Averages for the masks containing these patterns
will be added to the result df.
save_dir (str or Path):
directory where to save the result .csv
prefix (str, default=""):
adds a prefix to the .csv file name
Returns:
----------
a pandas dataframe of the metrics. Samples are rows and
metrics are columns:
_____________________
|sample|PQ|SQ|DQ|AJI|
|img1 |.5|.4|.6|.6 |
|img2 |.5|.4|.6|.6 |
"""
assert isinstance(inst_maps, dict), (
f"inst_maps: {type(inst_maps)} is not a dict of inst_maps"
)
assert isinstance(gt_masks, dict), (
f"inst_maps: {type(gt_masks)} is not a dict of inst_maps"
)
# Sort by file name
inst_maps = OrderedDict(sorted(inst_maps.items()))
gt_masks = OrderedDict(sorted(gt_masks.items()))
assert inst_maps.keys() == gt_masks.keys(), (
"inst_maps have different names as gt masks. insts: ",
f"{inst_maps.keys()}. gt's: {gt_masks.keys()}"
)
masks = list(
zip(inst_maps.keys(), gt_masks.values(), inst_maps.values())
)
metrics = []
with Pool() as pool:
for x in tqdm(
pool.imap_unordered(self.compute_metrics, masks),
total=len(masks),
desc="Runnning metrics"
):
metrics.append(x)
# drop Nones if no nuclei are found in an image
metrics = [metric for metric in metrics if metric]
score_df = pd.DataFrame.from_records(metrics)
score_df = score_df.set_index("name").sort_index()
score_df.loc["averages_for_the_set"] = score_df.mean(axis=0)
# Add averages to the df of files which contain patterns
if pattern_list is not None:
pattern_avgs = {
f"{p}_avg": score_df[score_df.index.str.contains(f"{p}")].mean(axis=0)
for p in pattern_list
}
score_df = pd.concat(
[score_df, pd.DataFrame(pattern_avgs).transpose()]
)
# Save results to .csv
if save_dir is not None:
save_dir = Path(save_dir)
score_df.to_csv(Path(save_dir / f"{prefix}_inst_benchmark.csv"))
return score_df
def benchmark_per_type(
self,
inst_maps: Dict[str, np.ndarray],
type_maps: Dict[str, np.ndarray],
gt_mask_insts: Dict[str, np.ndarray],
gt_mask_types: Dict[str, np.ndarray],
classes: Dict[str, int],
pattern_list: List[str]=None,
save_dir: Union[str, Path]=None,
prefix: str=""
) -> pd.DataFrame:
"""
Run benchmarking metrics per class type for all of the files in
the dataset. Note that the inst_maps and gt_masks need to share
exact same keys and be sorted so that they align when computing
metrics.
Args:
-----------
inst_maps (Dict[str, np.ndarray]):
A dict of file_name:inst_map key vals in order
type_maps (Dict[str, np.ndarray]):
A dict of file_name:panoptic_map key vals in order
gt_masks_insts (Dict[str, np.ndarray]):
A dict of file_name:gt_inst_map key vals in order
gt_masks_types (Dict[str, np.ndarray]):
A dict of file_name:gt_panoptic_map key vals in order
classes (Dict[str, int]):
The class dict e.g. {bg: 0, immune: 1, epithel: 2}.
background must be 0 class
pattern_list (List[str], default=None):
A list of patterns contained in the gt_mask and inst_map
names. Averages for the masks containing these patterns
will be added to the result df.
save_dir (str or Path):
directory where to save the result .csv
prefix (str, default=""):
adds a prefix to the .csv file name
Returns:
-----------
a pandas dataframe of the metrics. Samples are rows and
metrics are columns:
__________________________
|sample |PQ|SQ|DQ|AJI|
|img1_type1 |.5|.4|.6|.6 |
|img1_type2 |.5|.4|.6|.6 |
|img2_type1 |.5|.4|.6|.6 |
|img2_type2 |.5|.4|.6|.6 |
"""
assert isinstance(inst_maps, dict), (
f"inst_maps: {type(inst_maps)} is not a dict of inst_maps"
)
assert isinstance(type_maps, dict), (
f"inst_maps: {type(type_maps)} is not a dict of panoptic_maps"
)
assert isinstance(gt_mask_insts, dict), (
f"inst_maps: {type(gt_mask_insts)} is not a dict of inst_maps"
)
assert isinstance(gt_mask_types, dict), (
f"inst_maps: {type(gt_mask_types)} is not a dict of inst_maps"
)
# sort by name
inst_maps = OrderedDict(sorted(inst_maps.items()))
type_maps = OrderedDict(sorted(type_maps.items()))
gt_mask_insts = OrderedDict(sorted(gt_mask_insts.items()))
gt_mask_types = OrderedDict(sorted(gt_mask_types.items()))
assert inst_maps.keys() == gt_mask_insts.keys(), (
"inst_maps have different names as gt masks. insts: ",
f"{inst_maps.keys()}. gt's: {gt_mask_insts.keys()}"
)
# Loop masks per class
df_total = pd.DataFrame()
for c, ix in list(classes.items())[1:]: # skip bg
gts_per_class = [
get_type_instances(i, t, ix)
for i, t in zip(gt_mask_insts.values(), gt_mask_types.values())
]
insts_per_class = [
get_type_instances(i, t, ix)
for i, t in zip(inst_maps.values(), type_maps.values())
]
masks = list(zip(inst_maps.keys(), gts_per_class, insts_per_class))
metrics = []
with Pool() as pool:
for x in tqdm(
pool.imap_unordered(self.compute_metrics, masks),
total=len(masks), desc=f"Running metrics for {c}"
):
metrics.append(x)
# drop Nones if no classes are found in an image
metrics = [metric for metric in metrics if metric]
score_df = | pd.DataFrame.from_records(metrics) | pandas.DataFrame.from_records |
import sys
import os
import libsbml
from tqdm import tqdm
import pandas as pd
from itertools import product
from bioservices.kegg import KEGG
from requests.exceptions import HTTPError, RequestException
import helper_functions as hf
'''
Usage: check+annotate_metabolites.py <path_input_sbml-file> <outfile-csv> <program_name> <program_version>
<tolerate_charge_hydrogen_balancing> : -chBal, if +1 charge should correspond to +1 H-atom
Takes formulas from the notes field and fbc-plugin, if none are found, BiGG-DB is searched for a formula.
If multiple or no possibilities are given in BiGG, a csv-formatted table with these metabolites is returned.
Only searches info, but does not change the model.
'''
def main(args):
# console access
if len(args) < 3:
print(main.__doc__)
sys.exit(1)
infile = args[1]
outfile_mismatches = args[2]
outfile_formula_search = args[3]
tolerate_ch_h_bal = "-chBal" in args
if not os.path.exists(infile):
print("[Error] %s : No such file." % infile)
sys.exit(1)
# create Readers and Writers
reader = libsbml.SBMLReader()
# Read SBML File
doc = reader.readSBML(infile)
model = doc.getModel()
# Knowledge base preparation
# bigg_db = pd.read_csv("Databases/BiGG/bigg_models_metabolites.tsv", sep='\t')
mnx_db = pd.read_csv("Databases/MetaNetX/chem_prop.tsv", header=351, sep='\t')
mnx_db.rename(columns={'#ID': 'id'}, inplace=True)
mnx_db.fillna("", inplace=True)
seed_db = | pd.read_csv("Databases/SEED/compounds.tsv", header=0, sep="\t") | pandas.read_csv |
"""This module performs estimation of image overlap or shift
using phase correlation from OpenCV.
It contains class AdaptiveShiftEstimation
that can handle manual and automatically scanned image data sets.
"""
from copy import deepcopy
from itertools import chain
from typing import List, Tuple, Union
import cv2 as cv
import numpy as np
import pandas as pd
from .my_types import Image, DF
class AdaptiveShiftEstimation:
def __init__(self):
self._scan = ''
self._micro_ids = None
self._micro_x_size = None
self._micro_y_size = None
self._ids_in_clusters = []
self._default_image_shape = (0, 0)
def estimate(self, images: List[Image]) -> Union[Tuple[DF, DF, DF], Tuple[list, list, list]]:
self._default_image_shape = images[0].shape
if self._scan == 'auto':
ids, x_size, y_size = self.estimate_image_sizes_scan_auto(images)
return ids, x_size, y_size
elif self._scan == 'manual':
x_size, y_size = self.estimate_image_sizes_scan_manual(images)
ids = pd.DataFrame(self._micro_ids)
for j in ids.columns:
for i in ids.index:
try:
val = ids.loc[i, j]
val = int(val)
ids.loc[i, j] = val
except ValueError:
pass
return | pd.DataFrame(self._micro_ids) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 01 00:39:44 2018
@author: punck
"""
import numpy as np
import scipy.stats as ss
import pandas as pd
class Generator:
"""A random dataset generator class"""
def Binomial(self, n, p, size):
"""
Dataset of random binomial variables with probability of p and size size
n = number of trials
p = probability of success
size = output shape
"""
if not isinstance(size, tuple):
size = (size, 1)
y = np.random.binomial(n, p, size)
columns = []
length = size[1]
for i in range(length):
columns.append('x{}'.format(i+1))
df = pd.DataFrame(y, columns=columns)
return df
def Bernoulli(self, p, size):
"""
Dataset of Bernoulli random variables with probability of p and size size
n = number of trials
p = probability of success
size = output shape
"""
if not isinstance(size, tuple):
size = (size, 1)
y = np.random.binomial(1, p, size)
columns = []
length = size[1]
for i in range(length):
columns.append('x{}'.format(i+1))
df = pd.DataFrame(y, columns=columns)
return df
def Normal(self, mu, sigma, size):
"""
Dataset of variables of normal distribution with probability of p and size size
n = number of trials
p = probability of success
size = output shape
"""
if not isinstance(size, tuple):
size = (size, 1)
y = np.random.normal(mu, sigma, size)
columns = []
length = size[1]
for i in range(length):
columns.append('x{}'.format(i+1))
df = pd.DataFrame(y, columns=columns)
return df
def Uniform(self,low, high, size):
"""
[low, high)
"""
if not isinstance(size, tuple):
size = (size, 1)
y = np.random.rand(*size) * (high - low) + low
columns = []
length = size[1]
for i in range(length):
columns.append('x{}'.format(i+1))
df = | pd.DataFrame(y, columns=columns) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report, accuracy_score
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets
from tqdm import tqdm
from models import BadNet
from utils import print_model_perform
def loss_picker(loss):
if loss == 'mse':
criterion = nn.MSELoss()
elif loss == 'cross':
criterion = nn.CrossEntropyLoss()
else:
print("automatically assign mse loss function to you...")
criterion = nn.MSELoss()
return criterion
def optimizer_picker(optimization, param, lr):
if optimization == 'adam':
optimizer = optim.Adam(param, lr=lr)
elif optimization == 'sgd':
optimizer = optim.SGD(param, lr=lr)
else:
print("automatically assign adam optimization function to you...")
optimizer = optim.Adam(param, lr=lr)
return optimizer
def backdoor_model_trainer(dataname, train_data_loader, test_data_ori_loader, test_data_tri_loader, trigger_label, epoch, batch_size, loss_mode, optimization, lr, print_perform_every_epoch, basic_model_path, device):
badnet = BadNet(input_channels=train_data_loader.dataset.channels, output_num=train_data_loader.dataset.class_num).to(device)
criterion = loss_picker(loss_mode)
optimizer = optimizer_picker(optimization, badnet.parameters(), lr=lr)
train_process = []
print("### target label is %d, EPOCH is %d, Learning Rate is %f" % (trigger_label, epoch, lr))
print("### Train set size is %d, ori test set size is %d, tri test set size is %d\n" % (len(train_data_loader.dataset), len(test_data_ori_loader.dataset), len(test_data_tri_loader.dataset)))
for epo in range(epoch):
loss = train(badnet, train_data_loader, criterion, optimizer, loss_mode)
acc_train = eval(badnet, train_data_loader, batch_size=batch_size, mode='backdoor', print_perform=print_perform_every_epoch)
acc_test_ori = eval(badnet, test_data_ori_loader, batch_size=batch_size, mode='backdoor', print_perform=print_perform_every_epoch)
acc_test_tri = eval(badnet, test_data_tri_loader, batch_size=batch_size, mode='backdoor', print_perform=print_perform_every_epoch)
print("# EPOCH%d loss: %.4f training acc: %.4f, ori testing acc: %.4f, trigger testing acc: %.4f\n"\
% (epo, loss.item(), acc_train, acc_test_ori, acc_test_tri))
# save model
torch.save(badnet.state_dict(), basic_model_path)
# save training progress
train_process.append(( dataname, batch_size, trigger_label, lr, epo, loss.item(), acc_train, acc_test_ori, acc_test_tri))
df = | pd.DataFrame(train_process, columns=("dataname", "batch_size", "trigger_label", "learning_rate", "epoch", "loss", "train_acc", "test_ori_acc", "test_tri_acc")) | pandas.DataFrame |
import re
import time
import math
import sys
import os
import psutil
from abc import ABCMeta, abstractmethod
from pathlib import Path
from contextlib import contextmanager
import pandas as pd
import numpy as np
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
@contextmanager
def timer(name):
t0 = time.time()
print(f'[{name}] start')
yield
print(f'[{name}] done in {time.time() - t0:.0f} s')
class Feature(metaclass=ABCMeta):
prefix = ''
suffix = ''
dir = '.'
def __init__(self):
self.name = self.__class__.__name__
self.train = pd.DataFrame()
self.test = pd.DataFrame()
self.train_path = Path(self.dir) / f'{self.name}_train.ftr'
self.test_path = Path(self.dir) / f'{self.name}_test.ftr'
def run(self):
with timer(self.name):
self.create_features()
prefix = self.prefix + '_' if self.prefix else ''
suffix = '_' + self.suffix if self.suffix else ''
self.train.columns = prefix + self.train.columns + suffix
self.test.columns = prefix + self.test.columns + suffix
return self
@abstractmethod
def create_features(self):
raise NotImplementedError
def save(self):
self.train.to_feather(str(self.train_path))
self.test.to_feather(str(self.test_path))
def load_datasets(feats, fdir):
dfs = [pd.read_feather(f'{fdir}/{f}_train.ftr') for f in feats]
X_train = | pd.concat(dfs, axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from main.data import SETTINGS, IN_PAPER_NAMES
from framework.util import get_average_result_from_df, save_tsv, no_zeros_formatter, load_tsv
import datetime
import scipy.stats as st
directions=['be2vad', 'vad2be']
models=['baseline', 'reference_LM', 'Reference_KNN', 'my_model']
df=pd.DataFrame(index=[setting.name for setting in SETTINGS],
columns=['be2vad_'+model for model in models]+\
['vad2be_'+model for model in models])
for d in directions:
for setting in SETTINGS:
for model in models:
perf=get_average_result_from_df('results/{}/{}/{}.tsv'.format(
d, setting.name, model))
df.loc[setting.name, d+'_'+model]=perf
df.loc['Average']=df.mean(axis=0)
df.rename(index=IN_PAPER_NAMES, inplace=True)
save_tsv(df, 'overview.tsv', dec=3)
string = df.to_latex(float_format=no_zeros_formatter)
lines=string.split('\n')
lines[0]='\\begin{tabular}{|l|rrrr|rrrr|}'
lines=['%%%%%% Automatic Python output from {} &%%%%%%%%%%'.format(datetime.datetime.now())]+lines
lines[-1]='%%%%%%%%%%%%%%%%%%%%%%%%'
lines.insert(3, '{} & \multicolumn{4}{c|}{BE2VAD} & \multicolumn{4}{c|}{VAD2BE} \\\\')
lines[4]=lines[4].replace('be2vad\_','').replace('vad2be\_', '').\
replace('Reference\_','').replace('reference\_','').replace('my\_model','FFNN').\
replace('baseline','Baseline')
lines[2]='\\hline'
lines[5]='\\hline\\hline'
lines[-3]='\\hline'
lines.insert(-4, '\\hline')
string='\n'.join(lines)
print(string)
with open('overview.tex', 'w') as f:
print(string, file=f)
####################################################
### Significance tests
settings=[s.name for s in SETTINGS]
star_df= | pd.DataFrame(columns=directions) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Imports
import random
import pandas as pd
import numpy as np
from pyclustering.cluster.kmedoids import kmedoids
from pyclustering.cluster import cluster_visualizer
from pyclustering.utils.metric import distance_metric, type_metric
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min, silhouette_score
from .plot_utils import tsne_plot, embedding_plot, scatter_overlay
# Init schemes
class Init:
"""Class represents different initialization schemes.
Methods for selecting initial points on a user defined grid.
"""
def __init__(self, method, batch_size, distance='gower'):
"""
Parameters
----------
method : str
Sampling method. Opions include: 'random', 'PAM', 'k-means', and
'external'.
batch_size : int
Number of points to select.
distance_metric : str
Distance metric to be used with PAM. Options include: 'gower',
'euclidean', and 'euclidean_square'.
"""
self.method = method
self.batch_size = batch_size
self.distance_metric = distance
def run(self, obj, seed=None, export_path=None, visualize=False):
"""Run initialization algorithm on user defined domain.
Parameters
----------
obj : edbo.objective
Objective data container.
seed : None, int
Random seed for random selection and initial choice of medoids
or centroids.
export_path : None, str
Path to export visualization if applicable.
visualize : bool
If initialization method is set to 'pam' or 'kmeans' and visualize
is set to True then a 2D embedding of the clustering results will
be generated.
Returns
----------
pandas.DataFrame
Selected domain points.
"""
if 'rand' in self.method.lower():
self.experiments = rand(obj, self.batch_size, seed=seed)
elif self.method.lower() == 'pam' or 'medoids' in self.method.lower():
self.experiments = PAM(obj,
self.batch_size,
distance=self.distance_metric,
visualize=visualize,
seed=seed,
export_path=export_path)
elif 'means' in self.method.lower():
self.experiments = k_means(obj,
self.batch_size,
visualize=visualize,
seed=seed,
export_path=export_path)
elif 'ext' in self.method.lower():
self.experiments = external_data(obj)
else:
print('edbo bot: Specify a valid initialization method.')
return self.experiments
def plot_choices(self, obj, export_path=None):
"""Plot low dimensional embeddingd of initialization points in domain.
Parameters
----------
obj : edbo.objective
Objective data container.
export_path : None, str
Path to export visualization if applicable.
Returns
----------
pandas.DataFrame
Selected domain points.
"""
X = pd.concat([obj.domain.drop(self.experiments.index.values, axis=0), self.experiments])
domain = ['Domain' for i in range(len(obj.domain.drop(self.experiments.index.values, axis=0)))]
init = ['Initialization' for i in range(len(self.experiments))]
labels = domain + init
if len(X.iloc[0]) > 2:
tsne_plot(X,
y=labels,
label='Key',
colors='hls',
legend='full',
export_path=export_path)
elif len(X.iloc[0]) == 2:
scatter_overlay(X,
y=labels,
label='Key',
colors='hls',
legend='full',
export_path=export_path)
elif len(X.iloc[0]) == 1:
rep = pd.DataFrame()
rep[X.columns.values[0]] = X.iloc[:,0]
rep[' '] = [0 for i in range(len(X))]
scatter_overlay(rep,
y=labels,
label='Key',
colors='hls',
legend='full',
export_path=export_path)
# Random selection of domain points
def rand(obj, batch_size, seed=None):
"""Random selection of points.
Parameters
----------
obj : edbo.objective
Objective data container.
batch_size : int
Number of points to be selected.
seed : None, int
Random seed.
Returns
----------
pandas.DataFrame
Selected domain points.
"""
batch = obj.domain.sample(
n=batch_size,
random_state=seed)
return batch
# External data
def external_data(obj):
"""External data reader.
Parameters
----------
obj : edbo.objective
Objective data container.
Returns
----------
pandas.DataFrame
Selected domain points.
"""
print('\nUsing external results for initializaiton...\n')
return obj.results.drop(obj.target, axis=1)
def PAM(obj, batch_size, distance='gower', visualize=True,
seed=None, export_path=None):
"""Partitioning around medoids algorithm.
PAM function returns medoids of learned clusters.
PAM implimentated using pyclustering: https://pypi.org/project/pyclustering/
Parameters
----------
obj : edbo.objective
Objective data container.
batch_size : int
Number of points to be selected. Batch size also determins the number
of clusters. PAM returns the medoids.
distance : str
Distance metric to be used in the PAM algorithm. Options include:
'gower', 'euclidean', and 'euclidean_square'.
visualize : bool
Visualize the learned clusters.
seed : None, int
Random seed.
export_path : None, str
Path to export cluster visualization SVG image.
Returns
----------
pandas.DataFrame
Selected domain points.
"""
# print('\nInitializing using PAM...\n')
# Set random initial medoids
if type(seed) == type(1):
random.seed(a=seed)
initial_medoids = random.sample(range(len(obj.domain)), batch_size)
# Load list of points for cluster analysis
sample = obj.domain.values.tolist()
# Create instance of K-Medoids algorithm
if distance == 'gower':
max_range = (obj.domain.max() - obj.domain.min()).values
metric = distance_metric(type_metric.GOWER, max_range=max_range)
elif distance == 'euclidean':
metric = distance_metric(type_metric.EUCLIDEAN)
elif distance == 'euclidean_square':
metric = distance_metric(type_metric.EUCLIDEAN_SQUARE)
kmedoids_instance = kmedoids(sample, initial_medoids, metric=metric, tolerance=0.0001, itermax=300)
# Run cluster analysis and obtain results
kmedoids_instance.process()
medoids = kmedoids_instance.get_medoids()
medoids = obj.domain.iloc[medoids]
# Display clusters
if visualize == True:
# Get clusters
clusters = kmedoids_instance.get_clusters()
# If low d use built in visualization
if len(sample[0]) < 4:
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.show()
else:
columns = obj.domain.columns.values.tolist()
columns.append('label')
tsne_data = pd.DataFrame(columns=columns)
for i in range(len(clusters)):
data = obj.domain.iloc[clusters[i]].values.tolist()
data = pd.DataFrame(data=data, columns=columns[:-1])
data['label'] = [i] * len(clusters[i])
tsne_data = pd.concat([tsne_data,data])
embedding_plot(
tsne_data.drop('label',axis=1),
labels=tsne_data['label'].values.tolist(),
export_path=export_path
)
return medoids
def k_means(obj, batch_size, visualize=True, seed=None, export_path=None,
n_init=1, return_clusters=False, return_centroids=False):
"""K-Means algorithm.
k_means function returns domain points closest to the means of learned clusters.
k-means clustering implemented using scikit-learn: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
Parameters
----------
obj : edbo.objective
Objective data container.
batch_size : int
Number of points to be selected. Batch size also determins the number
of clusters. PAM returns the medoids.
visualize : bool
Visualize the learned clusters.
seed : None, int
Random seed.
export_path : None, str
Path to export cluster visualization SVG image.
Returns
----------
pandas.DataFrame
Selected domain points.
"""
# Run cluster analysis and choose best via silhouette
cluster_sizes = [n for n in range(batch_size, batch_size+10)]
scores = []
for n_clusters in cluster_sizes:
clusterer = KMeans(n_clusters=n_clusters, random_state=seed, n_init=n_init)
cluster_labels = clusterer.fit_predict(obj.domain)
silhouette_avg = silhouette_score(obj.domain, cluster_labels)
scores.append(silhouette_avg)
best = cluster_sizes[np.argmax(scores)]
print('edbo bot: ' + str(best) + ' clusters selected by silhouette score...')
# Refit with best value
clusterer = KMeans(n_clusters=best, random_state=seed, n_init=n_init)
cluster_labels = clusterer.fit_predict(obj.domain)
# Get points closes to the cluster means
closest = pd.DataFrame(columns=obj.domain.columns)
for i in range(best):
cluster_i = obj.domain.iloc[np.where(clusterer.labels_ == i)]
closest_i, _ = pairwise_distances_argmin_min(clusterer.cluster_centers_[[i]], cluster_i)
closest = | pd.concat([closest, cluster_i.iloc[closest_i]], sort=False) | pandas.concat |
# Author: <NAME>
# tomoyuki (at) genemagic.com
import sys
import argparse
import csv
import time
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--source', type=str, help="CSV data url or file.")
parser.add_argument('--fast', type=int, default=12, help="Fast length.")
parser.add_argument('--slow', type=int, default=26, help="Slow length.")
parser.add_argument('--signal', type=int, default=9, help="Signal length.")
parser.add_argument('--ma1', type=int, default=5, help="MA1 length.")
parser.add_argument('--ma2', type=int, default=25, help="MA2 length.")
parser.add_argument('--ma3', type=int, default=75, help="MA3 length.")
parser.add_argument('--bbp', type=int, default=20, help="BB period.")
parser.add_argument('--title', type=str, default="COVID19 Trend analysis.", help="Graph title.")
args = parser.parse_args()
return args
def process(args):
if args.source is None:
df = pd.read_csv(sys.stdin)
else:
df = pd.read_csv(args.source)
df['公表_年月日'] = pd.to_date | time(df['公表_年月日'], format="%Y-%m-%d") | pandas.to_datetime |
"""Module to provide generic utilities for other accelerometer modules."""
from collections import OrderedDict
import datetime
import glob
import json
import math
import os
import pandas as pd
import re
DAYS = ['mon', 'tue', 'wed', 'thur', 'fri', 'sat', 'sun']
TIME_SERIES_COL = 'time'
def formatNum(num, decimalPlaces):
"""return str of number formatted to number of decimalPlaces
When writing out 10,000's of files, it is useful to format the output to n
decimal places as a space saving measure.
:param float num: Float number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: Number formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.formatNum(2.567, 2)
2.57
"""
fmt = '%.' + str(decimalPlaces) + 'f'
return float(fmt % num)
def meanSDstr(mean, std, numDecimalPlaces):
"""return str of mean and stdev numbers formatted to number of decimalPlaces
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(std, numDecimalPlaces))
outStr += ')'
return outStr
def meanCIstr(mean, std, n, numDecimalPlaces):
"""return str of mean and 95% confidence interval numbers formatted
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int n: Number of observations
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
stdErr = std / math.sqrt(n)
lowerCI = mean - 1.96 * stdErr
upperCI = mean + 1.96 * stdErr
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(lowerCI, numDecimalPlaces))
outStr += ' - '
outStr += str(formatNum(upperCI, numDecimalPlaces))
outStr += ')'
return outStr
def toScreen(msg):
"""Print msg str prepended with current time
:param str mgs: Message to be printed to screen
:return: Print msg str prepended with current time
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.toScreen("hello")
2018-11-28 10:53:18 hello
"""
timeFormat = '%Y-%m-%d %H:%M:%S'
print(f"\n{datetime.datetime.now().strftime(timeFormat)}\t{msg}")
def writeStudyAccProcessCmds(accDir, outDir, cmdsFile='processCmds.txt',
accExt="cwa", cmdOptions=None, filesCSV="files.csv"):
"""Read files to process and write out list of processing commands
This creates the following output directory structure containing all
processing results:
<outDir>/
summary/ #to store outputSummary.json
epoch/ #to store feature output for 30sec windows
timeSeries/ #simple csv time series output (VMag, activity binary predictions)
nonWear/ #bouts of nonwear episodes
stationary/ #temp store for features of stationary data for calibration
clusterLogs/ #to store terminal output for each processed file
If a filesCSV exists in accDir/, process the files listed there. If not,
all files in accDir/ are processed
Then an acc processing command is written for each file and written to cmdsFile
:param str accDirs: Directory(s) with accelerometer files to process
:param str outDir: Output directory to be created containing the processing results
:param str cmdsFile: Output .txt file listing all processing commands
:param str accExt: Acc file type e.g. cwa, CWA, bin, BIN, gt3x...
:param str cmdOptions: String of processing options e.g. "--epochPeriod 10"
Type 'python3 accProccess.py -h' for full list of options
:param str filesCSV: Name of .csv file listing acc files to process
:return: New file written to <cmdsFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.writeStudyAccProcessingCmds("myAccDir/", "myResults/", "myProcessCmds.txt")
<cmd options written to "myProcessCmds.txt">
"""
# Create output directory structure
summaryDir = os.path.join(outDir, 'summary')
epochDir = os.path.join(outDir, 'epoch')
timeSeriesDir = os.path.join(outDir, 'timeSeries')
nonWearDir = os.path.join(outDir, 'nonWear')
stationaryDir = os.path.join(outDir, 'stationary')
logsDir = os.path.join(outDir, 'clusterLogs')
rawDir = os.path.join(outDir, 'raw')
npyDir = os.path.join(outDir, 'npy')
createDirIfNotExists(summaryDir)
createDirIfNotExists(epochDir)
createDirIfNotExists(timeSeriesDir)
createDirIfNotExists(nonWearDir)
createDirIfNotExists(stationaryDir)
createDirIfNotExists(logsDir)
createDirIfNotExists(rawDir)
createDirIfNotExists(npyDir)
createDirIfNotExists(outDir)
# Use filesCSV if provided, else process everything in accDir (and create filesCSV)
if filesCSV in os.listdir(accDir):
fileList = pd.read_csv(os.path.join(accDir, filesCSV))
else:
fileList = pd.DataFrame(
{'fileName': [f for f in os.listdir(accDir) if f.endswith(accExt)]}
)
fileList.to_csv(os.path.join(accDir, filesCSV), index=False)
with open(cmdsFile, 'w') as f:
for i, row in fileList.iterrows():
cmd = [
'accProcess "{:s}"'.format(os.path.join(accDir, row['fileName'])),
'--summaryFolder "{:s}"'.format(summaryDir),
'--epochFolder "{:s}"'.format(epochDir),
'--timeSeriesFolder "{:s}"'.format(timeSeriesDir),
'--nonWearFolder "{:s}"'.format(nonWearDir),
'--stationaryFolder "{:s}"'.format(stationaryDir),
'--rawFolder "{:s}"'.format(rawDir),
'--npyFolder "{:s}"'.format(npyDir),
'--outputFolder "{:s}"'.format(outDir)
]
# Grab additional arguments provided in filesCSV (e.g. calibration params)
cmdOptionsCSV = ' '.join(['--{} {}'.format(col, row[col]) for col in fileList.columns[1:]])
if cmdOptions:
cmd.append(cmdOptions)
if cmdOptionsCSV:
cmd.append(cmdOptionsCSV)
cmd = ' '.join(cmd)
f.write(cmd)
f.write('\n')
print('Processing list written to ', cmdsFile)
print('Suggested dir for log files: ', logsDir)
def collateJSONfilesToSingleCSV(inputJsonDir, outputCsvFile):
"""read all summary *.json files and convert into one large CSV file
Each json file represents summary data for one participant. Therefore output
CSV file contains summary for all participants.
:param str inputJsonDir: Directory containing JSON files
:param str outputCsvFile: Output CSV filename
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.collateJSONfilesToSingleCSV("data/", "data/summary-all-files.csv")
<summary CSV of all participants/files written to "data/sumamry-all-files.csv">
"""
# First combine into <tmpJsonFile> the processed outputs from <inputJsonDir>
tmpJsonFile = outputCsvFile.replace('.csv', '-tmp.json')
count = 0
with open(tmpJsonFile, 'w') as fSummary:
for fStr in glob.glob(inputJsonDir + "*.json"):
if fStr == tmpJsonFile:
continue
with open(fStr) as f:
if count == 0:
fSummary.write('[')
else:
fSummary.write(',')
fSummary.write(f.read().rstrip())
count += 1
fSummary.write(']')
# Convert temporary json file into csv file
dict = json.load(open(tmpJsonFile, "r"), object_pairs_hook=OrderedDict) # read json
df = pd.DataFrame.from_dict(dict) # create pandas object from json dict
refColumnItem = next((item for item in dict if item['quality-goodWearTime'] == 1), None)
dAcc = df[list(refColumnItem.keys())] # maintain intended column ordering
# infer participant ID
dAcc['eid'] = dAcc['file-name'].str.split('/').str[-1].str.replace('.CWA', '.cwa').str.replace('.cwa', '')
dAcc.to_csv(outputCsvFile, index=False)
# remove tmpJsonFile
os.remove(tmpJsonFile)
print('Summary of', str(len(dAcc)), 'participants written to:', outputCsvFile)
def identifyUnprocessedFiles(filesCsv, summaryCsv, outputFilesCsv):
"""identify files that have not been processed
Look through all processed accelerometer files, and find participants who do
not have records in the summary csv file. This indicates there was a problem
in processing their data. Therefore, output will be a new .csv file to
support reprocessing of these files
:param str filesCsv: CSV listing acc files in study directory
:param str summaryCsv: Summary CSV of processed dataset
:param str outputFilesCsv: Output csv listing files to be reprocessed
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.identifyUnprocessedFiles("study/files.csv", study/summary-all-files.csv",
"study/files-reprocess.csv")
<Output csv listing files to be reprocessed written to "study/files-reprocess.csv">
"""
fileList = pd.read_csv(filesCsv)
summary = | pd.read_csv(summaryCsv) | pandas.read_csv |
import pandas as pd
import pytest
from woodwork.logical_types import Datetime, Double, Integer, NaturalLanguage
from featuretools.entityset import EntitySet
from featuretools.tests.testing_utils import get_df_tags
from featuretools.utils.gen_utils import Library, import_or_none
from featuretools.utils.koalas_utils import pd_to_ks_clean
ks = import_or_none('databricks.koalas')
@pytest.mark.skipif('not ks')
def test_add_dataframe_from_ks_df(pd_es):
cleaned_df = pd_to_ks_clean(pd_es["log"])
log_ks = ks.from_pandas(cleaned_df)
ks_es = EntitySet(id="ks_es")
ks_es = ks_es.add_dataframe(
dataframe_name="log_ks",
dataframe=log_ks,
index="id",
time_index="datetime",
logical_types=pd_es["log"].ww.logical_types,
semantic_tags=get_df_tags(pd_es["log"])
)
pd.testing.assert_frame_equal(cleaned_df, ks_es["log_ks"].to_pandas(), check_like=True)
@pytest.mark.skipif('not ks')
def test_add_dataframe_with_non_numeric_index(pd_es, ks_es):
df = pd.DataFrame({"id": pd.Series(["A_1", "A_2", "C", "D"], dtype='string'),
"values": [1, 12, -34, 27]})
ks_df = ks.from_pandas(df)
pd_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=df,
index="id",
logical_types={"id": NaturalLanguage, "values": Integer})
ks_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=ks_df,
index="id",
logical_types={"id": NaturalLanguage, "values": Integer})
pd.testing.assert_frame_equal(pd_es['new_dataframe'].reset_index(drop=True), ks_es['new_dataframe'].to_pandas())
@pytest.mark.skipif('not ks')
def test_create_entityset_with_mixed_dataframe_types(pd_es, ks_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
ks_df = ks.from_pandas(df)
err_msg = "All dataframes must be of the same type. " \
"Cannot add dataframe of type {} to an entityset with existing dataframes " \
"of type {}"
# Test error is raised when trying to add Koalas dataframe to entitset with existing pandas dataframes
with pytest.raises(ValueError, match=err_msg.format(type(ks_df), type(pd_es.dataframes[0]))):
pd_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=ks_df,
index="id")
# Test error is raised when trying to add pandas dataframe to entitset with existing ks dataframes
with pytest.raises(ValueError, match=err_msg.format(type(df), type(ks_es.dataframes[0]))):
ks_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=df,
index="id")
@pytest.mark.skipif('not ks')
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
ks_es = EntitySet(id="ks_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [ | pd.to_datetime('2019-01-10') | pandas.to_datetime |
'''
Author:<NAME>
<EMAIL>'''
# Import required libraries
import pathlib
import dash
import numpy as np
from dash.dependencies import Input, Output, State, ClientsideFunction
import dash_core_components as dcc
import dash_html_components as html
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
from dateutil.relativedelta import *
from datetime import datetime
from controls import TYPE_COLORS,PORTS_COLORS,FLEET
from choropleth_map_emission import choropleth_map, sum_by_hexagon
##DataFrames
from data_filtering import processed_data
import pandas as pd
import geopandas as gpd
import os
import requests
##Databases
##Databases
panama_ports=gpd.read_file("data/Panama_ports.geojson")
canal,ports=processed_data(FLEET)
gatun=pd.read_csv("data/draught_restr_data.csv")
em=pd.read_csv("data/emissions_type_monthly.csv")
em["dt_pos_utc"]=pd.to_datetime(em["dt_pos_utc"])
pol=gpd.read_file("data/Panama_Canal.geojson")[["Name","geometry"]]
pol=pol[pol.geometry.apply(lambda x: x.geom_type=="Polygon")]
##Transform to datetime. Preferred to read csv method which is less flexible.
canal["time_at_entrance"]=pd.to_datetime(canal["time_at_entrance"])
ports["initial_service"]=pd.to_datetime(ports["initial_service"])
gatun["Date"]=pd.to_datetime(gatun["Date"])
##Ports color
panama_ports=panama_ports.assign(color="#F9A054")
# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
app = dash.Dash(
__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
app.title = 'Panama Maritime Stats'
server = app.server
# Create global chart template
MAPBOX_TOKEN = os.environ.get('MAPBOX_TOKEN', None)
layout_map = dict(
autosize=True,
paper_bgcolor='#30333D',
plot_bgcolor='#30333D',
margin=dict(l=10, r=10, b=10, t=10),
hovermode="closest",
font=dict(family="HelveticaNeue",size=17,color="#B2B2B2"),
legend=dict(font=dict(size=10), orientation="h"),
mapbox=dict(
accesstoken=MAPBOX_TOKEN,
style='mapbox://styles/gabrielfuenmar/ckhs87tuj2rd41amvifhb26ad',
center=dict(lon=-79.55, lat=8.93),
zoom=9,
),
showlegend=False,
)
layout= dict(
legend=dict(bgcolor='rgba(0,0,0,0)',font=dict(size=14,family="HelveticaNeue")),
font_family="HelveticaNeue",
font_color="#B2B2B2",
title_font_family="HelveticaNeue",
title_font_color="#B2B2B2",
title_font_size=20,
paper_bgcolor='#21252C',
plot_bgcolor='#21252C',
xaxis=dict(gridcolor="rgba(178, 178, 178, 0.1)",title_font_size=15,
tickfont_size=14,title_font_family="HelveticaNeue",tickfont_family="HelveticaNeue"),
yaxis=dict(gridcolor="rgba(178, 178, 178, 0.1)",title_font_size=15,tickfont_size=14,
title_font_family="HelveticaNeue",tickfont_family="HelveticaNeue")
)
##Modebar on graphs
config={"displaylogo":False, 'modeBarButtonsToRemove': ['autoScale2d']}
##Annotation on graphs
annotation_layout=dict(
xref="x domain",
yref="y domain",
x=0.25,
y=-0.35)
# Create app layout
app.layout = html.Div(
[
dcc.Store(id="aggregate_data"),
# empty Div to trigger javascript file for graph resizing
html.Div(id="output-clientside"),
html.Div(
[
html.Div(
[
html.A(html.Img(
src=app.get_asset_url("mtcc_logo_v3.png"),
id="plotly-image",
style={
"height": "160px",
"width": "auto",
"margin-bottom": "0px",
"text-align": "center"
},
),
href="https://mtcclatinamerica.com/")
],
className="one-half column",
),
html.Div(
[
html.Div(
[
html.H3(
"Panama Maritime Statistics",
style={"margin-bottom": "0px"},
),
html.H5(
"Efficiency and Sustainability Indicators", style={"margin-top": "0px"}
),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.Button("Refresh", id="refresh-button"),
html.A(
html.Button("Developer", id="home-button"),
href="https://gabrielfuentes.org",
)
],
className="one-third column",
id="button",
style={
"text-align": "center"},
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "15px"},
),
html.Div(
[
html.Div(
[
html.P("Date Filter",
className="control_label",
),
html.Div([html.P(id="date_from"),
html.P(id="date_to")],className="datecontainer")
,
dcc.RangeSlider(
id="year_slider",
min=0,
max=20,
value=[0, 20],
marks={
0:"Dec 2018",
5:"May 2019",
10:"Oct 2019",
15:"Mar 2020",
20:"Aug 2020"},
allowCross=False,
className="dcc_control",
),
html.P("Vessel Type:", className="control_label"),
dcc.Dropdown(
id='types-dropdown',
options=[{'label': row,'value': row} \
for row in sorted(FLEET)],
placeholder="All",multi=True,
className="dcc_control"),
html.P("Port:", className="control_label"),
dcc.Dropdown(
id='ports-dropdown',
options=[{'label': row,'value': row} \
for row in sorted(ports[~ports.port_name.isin(["Pacific - PATSA","Colon2000"])]\
.dropna(subset=["port_name"]).port_name.unique())+["Panama Canal South", "Panama Canal North"]],
placeholder="All",multi=True,
className="dcc_control"),
html.P(
"Vessel Size (GT)",
className="control_label",
),
html.Div([html.P(id="size_from"),
html.P(id="size_to")],className="datecontainer"),
dcc.RangeSlider(
id="size_slider",
min=400,
max=170000,
value=[400, 170000],
step=8500,
marks={
400:"400",
35000:"35k",
70000:"70k",
105000:"105k",
140000:"140k",
170000:"170k"},
allowCross=False,
className="dcc_control",
),
],
className="pretty_container four columns",
id="cross-filter-options",
),
html.Div(
[
html.Div(
[
html.Div(
[html.H6(id="waitingText"), html.P("Waiting Average")],
id="waiting",
className="mini_container",
),
html.Div(
[html.H6(id="opsText"), html.P("Operations")],
id="ops",
className="mini_container",
),
html.Div(
[html.H6(id="serviceText"), html.P("Service Average")],
id="service_m",
className="mini_container",
),
html.Div(#####Hardcoded for the time being. Build a scrapper.
[html.H6(["15.24 m"],id="draughtText"), html.P("Canal Max Draught TFW")],
id="draught",
className="mini_container",
),
],
id="info-container",
className="row container-display",
),
html.Div([
html.Div(
[
html.Div([html.H5("Emissions Review"),
html.H6(id="month_map",style={"color":"white"})],
style={"display": "flex", "flex-direction": "row","justify-content":"space-between"}),
dcc.Graph(animate=False,config=config,id="map_in"),
html.P(["Grid size"],id="grid_size",className="control_label"),
dcc.Slider(
id="zoom_slider",
min=4,
max=8,
value=8,
marks={
4:{'label': '1'},5:{'label': '2'},6:{'label': '3'},
7:{'label': '4'},8:{'label': '5'}},
className="dcc_control",
included=False),
dcc.RadioItems(
id='selector',options=[{'label': "CO2 emissions", 'value': "co2"},
{'label': "CH4 emissions", 'value': "ch4"}],
value="co2",labelStyle={'display': 'inline-block'}),
],
id="emissionsMapContainer",
className="pretty_container eight columns",
)
],
className="row flex-display",
),
],
id="right-column",
className="eight columns",
),
],
className="row flex-display",
),
html.Div(
[
html.Div(
[dcc.Graph(id="service_graph",config=config)],
className="pretty_container six columns",
),
html.Div(
[dcc.Graph(id="waiting_graph",config=config)],
className="pretty_container six columns",
)
],
className="row flex-display",
),
html.Div(
[
html.Div(
[dcc.Graph(id="draught_graph",config=config)],
className="pretty_container six columns",
),
html.Div(
[dcc.Graph(id="ratio_graph",config=config)],
className="pretty_container six columns",
),
],
className="row flex-display",
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
def upper_text_p1(fr="01-01-2019",to="18-11-2020",ports_sel=["All"],
type_vessel=["All"],size=["All"],text_bar=True,*args):
date_from=pd.to_datetime(fr)
date_to=pd.to_datetime(to)
canal_in=canal[(canal.time_at_entrance.between(date_from,date_to))&(canal.direct_transit_boolean==True)].\
copy()
ports_in=ports[ports.initial_service.between(date_from,date_to)].\
copy()
canal_in=canal_in.assign(day=canal_in.time_at_entrance.dt.date)
canal_in=canal_in[["day","waiting_time","service_time","port_name","draught_ratio","StandardVesselType","GT"]]
canal_in["day"]=pd.to_datetime(canal_in.day)
ports_in=ports_in.assign(day=ports_in.initial_service.dt.date)
ports_in=ports_in[["day","waiting_time","service_time","port_name","draught_ratio","StandardVesselType","GT"]]
ports_in["day"]=pd.to_datetime(ports_in.day)
df_in=pd.concat([ports_in,canal_in],axis=0)
if "All" not in ports_sel:
df_in=df_in[df_in.port_name.isin(ports_sel)]
if "All" not in size:
df_in=df_in[df_in.GT.between(size[0],size[1])]
if "All" not in type_vessel:
df_in=df_in[df_in["StandardVesselType"].isin(type_vessel)]
if text_bar is True: ##Row at top with summary values
waiting_mean=df_in.waiting_time.mean()
ops=df_in.shape[0]
service_mean=df_in.service_time.mean()
return waiting_mean,ops,service_mean
else: ###Graphs on waiting, service time and draught ratio
##Fig ratio
df_in=df_in[df_in.day>pd.to_datetime("01-01-2019")]
df_in=df_in.reset_index(drop=True)
series_grouped=[]
for name,row in df_in.\
groupby([df_in.day.dt.isocalendar().week,df_in.day.dt.year,"StandardVesselType"]):
series_grouped.append([pd.to_datetime(str(name[1])+"-"+str(name[0])+"-1",format='%Y-%W-%w'),name[2],row.draught_ratio.mean()])
series_grouped=pd.DataFrame(series_grouped,columns=["day","StandardVesselType","draught_ratio"]).sort_values(by=["day"])
draught_fig = go.Figure()
for val in series_grouped["StandardVesselType"].unique():
series_in=series_grouped[series_grouped["StandardVesselType"]==val]
draught_fig.add_trace(go.Scatter(
name=val,
mode="markers+lines",
x=series_in.day,y=series_in.draught_ratio,
line=dict(shape="spline", width=1, color=TYPE_COLORS[val]),
marker=dict(symbol="diamond-open")))
draught_fig.update_layout(layout,legend=dict(x=1),title_text="<b>Draught Ratio per vessel type</b>",
xaxis=dict(title_text="Date"),yaxis=dict(title_text="Ratio"),)
draught_fig.add_annotation(annotation_layout,text="*AIS draft/min(maxTFWD, max Allowable draft)")
##Service and waiting time
labels_w=[]
remove_w=[]
waiting=[]
for name,row in df_in.groupby("port_name"):
if len(row.waiting_time.dropna().tolist())>25:
labels_w.append(name)
wa_li=row.waiting_time[(row.waiting_time>1)&(row.waiting_time<row.waiting_time.quantile(0.95))&\
(row.waiting_time>row.waiting_time.quantile(0.05))]
waiting.append(wa_li.dropna().tolist())
else:
remove_w.append(name)
labels_s=[]
remove_s=[]
service=[]
for name,row in df_in.groupby("port_name"):
if len(row.service_time.dropna().tolist())>25:
labels_s.append(name)
se_li=row.service_time[(row.service_time>0)&(row.service_time<row.service_time.quantile(0.95))&\
(row.service_time>row.service_time.quantile(0.05))]
service.append(se_li.dropna().tolist())
else:
remove_s.append(name)
##Figs of waiting and service time
if len(labels_w)>0:
fig_waiting = ff.create_distplot(waiting, labels_w,histnorm="probability density",colors=list(PORTS_COLORS.values()),show_rug=False,show_curve=False)
else:
fig_waiting=go.Figure()
fig_waiting.add_annotation(x=2,y=5,xref="x",yref="y",text="max=5",showarrow=True,
font=dict(family="Courier New, monospace",size=16, color="#ffffff"),align="center",
arrowhead=2, arrowsize=1, arrowwidth=2,arrowcolor="#636363", ax=20,ay=-30,bordercolor="#c7c7c7",
borderwidth=2,borderpad=4,bgcolor="#ff7f0e",opacity=0.8)
if len(labels_s)>0:
fig_service = ff.create_distplot(service, labels_s,histnorm="probability density",colors=list(PORTS_COLORS.values()),show_rug=False,show_curve=False)
else:
fig_service=go.Figure()
fig_service.add_annotation(x=2,y=5,xref="x",yref="y",text="max=5",showarrow=True,
font=dict(family="Courier New, monospace",size=16, color="#ffffff"),align="center",
arrowhead=2, arrowsize=1, arrowwidth=2,arrowcolor="#636363", ax=20,ay=-30,bordercolor="#c7c7c7",
borderwidth=2,borderpad=4,bgcolor="#ff7f0e",opacity=0.8)
###Service and Waiting Graphs Layout
fig_waiting.update_layout(layout,yaxis=dict(zeroline=True,linecolor='white',title_text="Density"),
xaxis=dict(title_text="Hours"),
legend=dict(x=0.6),title_text="<b>Waiting Time</b>")
fig_waiting.add_annotation(annotation_layout,text="*Results from inbuilt method by Fuentes, Sanchez-Galan and Diaz")
fig_waiting.update_traces(marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
fig_service.update_layout(layout,yaxis=dict(zeroline=True,linecolor="white",title_text="Density"),
xaxis=dict(title_text="Hours"),
legend=dict(x=0.6),title_text="<b>Service Time</b>")
fig_service.add_annotation(annotation_layout,text="*Results from inbuilt method by Fuentes, Sanchez-Galan and Diaz")
fig_service.update_traces(marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
return fig_waiting,fig_service,draught_fig
def lake_draught(fr="01-01-2015",to="18-11-2020",*args):
gatun_in=gatun.copy()
date_from=pd.to_datetime(fr)
date_to=pd.to_datetime(to)
gatun_in=gatun_in[gatun_in.Date.between(date_from,date_to)]
gatun_in=gatun_in.assign(day=gatun_in.Date.dt.day.astype(str)+"/"+gatun_in.Date.dt.month.astype(str)+"/"+gatun_in.Date.dt.year.astype(str))
lake_fig=make_subplots(specs=[[{"secondary_y": True}]])
lake_fig.add_trace(go.Scatter(
name="Gatun Lake Depth",
mode="lines",
x=gatun_in.day,y=gatun_in.gatun_depth,
line=dict(shape="spline", width=2,color="#6671FD")),secondary_y=True)
lake_fig.add_trace(go.Scatter(
name="Draught Change",
mode="lines",
x=gatun_in[gatun_in.Change.notnull()]["day"],y=gatun_in[gatun_in.Change.notnull()]["Change"],
line=dict(shape="spline", width=2,color="#3ACC95"),
marker=dict(symbol="diamond-open")),secondary_y=False)
lake_fig.add_trace(go.Scatter(
name="Max draught",
mode="lines",
x=gatun_in.day,y=gatun_in.Overall,
line=dict(shape="spline", width=2,color="#F9A054")),secondary_y=False)
##Layout update
lake_fig.update_layout(layout,title_text="<b>Gatun Lake and Draught Restriction Relation</b>",
xaxis=dict(title_text="Date",nticks=6),
legend=dict(x=0.6,y=1))
# Set y-axes titles
lake_fig.update_yaxes(title_text="Max Draught (m)", secondary_y=False,showgrid=False,
range=[gatun_in.Overall.min()*0.99,gatun_in.Overall.max()*1.05])
lake_fig.update_yaxes(title_text="Lake Depth (m)", secondary_y=True,gridcolor="rgba(178, 178, 178, 0.1)",
title_font_size=15,tickfont_size=14,
title_font_family="HelveticaNeue",tickfont_family="HelveticaNeue",
range=[gatun_in.gatun_depth.min()*0.99,gatun_in.gatun_depth.max()*1.05])
lake_fig.add_annotation(annotation_layout,text="*Values sourced by the Panama Canal Authority Maritime Services Platform")
return lake_fig
def emissions_map(ghg,res,fr="01-01-2018",to="30-08-2020",lat=None,lon=None,zoom=None,type_vessel=[],size=[]):
emissions_in=em.copy()
date_fr=pd.to_datetime(fr)
date_to=pd.to_datetime(to)
df_aggreg=sum_by_hexagon(emissions_in,res,pol,date_fr,date_to,vessel_type=type_vessel,gt=size)
##Update layout
if lat is not None:
layout_map["mapbox"]["center"]["lon"]=lon
layout_map["mapbox"]["center"]["lat"]=lat
layout_map["mapbox"]["zoom"]=zoom
if df_aggreg.shape[0]>0:
heatmap=choropleth_map(ghg,df_aggreg,layout_map)
else:
heatmap=go.Figure(data=go.Scattermapbox(lat=[0],lon=[0]),layout=layout_map)
return heatmap
##Upper Row,
@app.callback(
[
Output("waitingText", "children"),
Output("opsText", "children"),
Output("serviceText", "children"),
Output("date_from","children"),
Output("date_to","children"),
Output("size_from","children"),
Output("size_to","children"),
],
[Input("ports-dropdown", "value"),
Input("types-dropdown","value"),
Input('year_slider', 'value'),
Input('size_slider', 'value'),
],
)
def update_row1(ports_val,types_val,date,size_val):
if not ports_val:
ports_val=["All"]
if not types_val:
types_val=["All"]
date_fr=pd.to_datetime("12-01-2018 00:00")+relativedelta(months=+date[0])
date_to= | pd.to_datetime("12-01-2018 00:00") | pandas.to_datetime |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
| pd.testing.assert_frame_equal(expected_out, out) | pandas.testing.assert_frame_equal |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from typing import List, Optional
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
import torcharrow.pytorch as tap
from torcharrow._interop import (
from_arrow_table,
from_arrow_array,
from_pandas_dataframe,
from_pandas_series,
)
from torcharrow.scope import Scope
# replicated here since we don't expose it from interop.py
# TO DELETE: New logic, mask illegal data...
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
class TestLegacyInterop(unittest.TestCase):
def setUp(self):
self.ts = Scope({"device": "demo"})
def test_numpy_numerics_no_mask(self):
# numerics...
for np_type, ta_type in zip(
[np.int8, np.int16, np.int32, np.int64, np.float32, np.float64],
[dt.int8, dt.int16, dt.int32, dt.int64, dt.Float32(True), dt.Float64(True)],
):
self.assertEqual(dt.typeof_np_dtype(np_type), ta_type)
arr = np.ones((20,), dtype=np_type)
# type preserving
self.assertEqual(dt.typeof_np_dtype(arr.dtype), ta_type)
col = self.ts._FullColumn(arr, dtype=ta_type)
self.assertTrue(col.valid(1))
arr[1] = 99
self.assertEqual(arr[1], 99)
self.assertEqual(col[1], 99)
def test_numpy_numerics_with_mask(self):
for np_type, ta_type in zip(
[np.int8, np.int16, np.int32, np.int64, np.float32, np.float64],
[dt.int8, dt.int16, dt.int32, dt.int64, dt.Float32(True), dt.Float64(True)],
):
data = np.ones((20,), dtype=np_type)
mask = np.full((len(data),), False, dtype=np.bool8)
mask[1] = True
arr = ma.array(data, mask=mask)
col = self.ts._FullColumn(data, dtype=ta_type, mask=mask)
# all defined, except...
self.assertFalse(col.valid(1))
self.assertTrue(col.valid(2))
data[1] = 99
self.assertTrue(ma.is_masked(arr[1]))
self.assertEqual(col[1], None)
def test_strings_no_mask(self):
# dt.strings (with np.str_ representation)
arr = np.array(["a", "b", "cde"], dtype=np.str_)
self.assertEqual(dt.typeof_np_dtype(arr.dtype), dt.string)
col = self.ts._FullColumn(arr, dtype=dt.string)
arr[1] = "kkkk"
self.assertEqual(arr[1], "kkk")
self.assertEqual(col[1], "kkk")
# dt.strings (with object representation)
arr = np.array(["a", "b", "cde"], dtype=object)
self.assertEqual(dt.typeof_np_dtype(arr.dtype), dt.String(True))
col = self.ts._FullColumn(arr, dtype=dt.String(True))
self.assertTrue(col.valid(1))
arr[1] = "kkkk"
self.assertEqual(arr[1], "kkkk")
self.assertEqual(col[1], "kkkk")
def test_strings_with_mask(self):
def is_not_str(s):
return not isinstance(s, str)
# dt.strings (with object representation)
arr = np.array(["a", None, "cde"], dtype=object)
self.assertEqual(dt.typeof_np_dtype(arr.dtype), dt.String(True))
mask = np.vectorize(is_not_str)(arr)
col = self.ts._FullColumn(arr, dtype=dt.String(True), mask=mask)
self.assertTrue(col.valid(0))
self.assertFalse(col.valid(1))
arr[1] = "kkkk"
self.assertEqual(arr[1], "kkkk")
self.assertEqual(col._data[1], "kkkk")
self.assertEqual(col[1], None)
def test_panda_series(self):
s = pd.Series([1, 2, 3])
self.assertEqual(list(s), list(from_pandas_series(s)))
s = pd.Series([1.0, np.nan, 3])
self.assertEqual([1.0, None, 3], list(from_pandas_series(s)))
s = pd.Series([1, 2, 3])
self.assertEqual(list(s), list(from_pandas_series(s, dt.Int16(False))))
s = pd.Series([1, 2, 3])
t = from_pandas_series(s)
self.assertEqual(t.dtype, dt.Int64(False))
self.assertEqual(list(s), list(from_pandas_series(s)))
s = pd.Series([True, False, True])
t = from_pandas_series(s)
self.assertEqual(t.dtype, dt.Boolean(False))
self.assertEqual(list(s), list(from_pandas_series(s)))
s = pd.Series(["a", "b", "c", "d", "e", "f", "g"])
t = from_pandas_series(s)
# TODO Check following assert
# self.assertEqual(t.dtype, dt.String(False))
self.assertEqual(list(s), list(t))
def test_panda_dataframes(self):
s = | pd.DataFrame({"a": [1, 2, 3]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 07:16:35 2018
@author: MiguelArturo
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from math import log, sqrt
import numpy as np
import pandas as pd
from bokeh.plotting import figure
#Import modules for interactive graphics
from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Select
from bokeh.io import curdoc
#Import modules for conversions to radians.
import math
#Import modules for time management and time zones
import time
def make_plot(source):
hover = HoverTool(
names=["anular_wedges"],
tooltips=[
("Activity", "@Activity_name"),
("(ir,or)", "(@inner_radius, @outer_radius)"),
("color", "@color"),
])
plot = figure(width=700, height=700,tools=[hover], title="",x_axis_type=None, y_axis_type=None, x_range=(-420, 420), y_range=(-420, 420),
min_border=0, outline_line_color="white", background_fill_color="#ffffff",)
plot.annular_wedge(x=0, y=0, inner_radius='inner_radius', outer_radius='outer_radius',start_angle='start_angle', end_angle='end_angle',
color='color', alpha=0.6, hover_color="lightgrey", source=source,name="anular_wedges")
#Fixed attributes
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = None
#plot clock
angles = 2*np.pi/24*pd.Series(list(range(0,24)))
plot.annular_wedge(0, 0, fr_inner_radius, tr_outer_radius, angles, angles, color="lightgrey")
# Plot clock labels (24 hours)
labels = np.power(10.0, np.arange(-3, 4))
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = ((tr_outer_radius + 10) - fr_inner_radius) / (minr - maxr)
b = fr_inner_radius - a * maxr
radii = a * np.sqrt(np.log(labels * 1E4)) + b
xr = radii[0]*np.cos(np.array(angles))
yr = radii[0]*np.sin(np.array(angles))
label_angle=np.array(angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
labels_24h_clock = list(range(6,-1,-1)) + list(range(23,6,-1))
plot.text(xr, yr, pd.Series(labels_24h_clock), angle=label_angle, text_font_size="9pt", text_align="center", text_baseline="middle",
text_color="lightgrey")
return plot
def get_dataset (src,timestamp):
duration_index = np.where(src['Start_Date']== timestamp)
LCday,LCtime = timestamp.split(" ",1)
start_time = LCtime
duration = src["Duration"][duration_index[0][0]]
activity_name= src["Name"][duration_index[0][0]]
#Convert HH:MM:SS format in radians
ts = time.strptime(start_time, "%H:%M:%S")
hour = (ts[3] + (ts[4]/60) + (ts[5]/3600))
hour_rad = math.radians(hour * 15.0)
#add "pi/2" to transform radians to a 24 hours clock form.
hour_in_radians_to_plot = -hour_rad + np.pi/2
#Convert seconds in radians
sec_rad = time.gmtime(duration)
hour_duration = (sec_rad[3] + (sec_rad[4]/60))
hour_rad_duration = math.radians(hour_duration * 15.0)
duration_in_radians_to_plot = (hour_in_radians_to_plot + hour_rad_duration)
start_angle= hour_in_radians_to_plot - hour_rad_duration
end_angle= duration_in_radians_to_plot - hour_rad_duration
df = pd.DataFrame({'start_angle':[start_angle],
'end_angle':[end_angle],
'color':["pink"],
'inner_radius':[fr_inner_radius],
'outer_radius':[fr_outer_radius],
'Activity_name':[activity_name],
})
return ColumnDataSource(data=df)
def update_plot(attrname, old, new):
timestamp = select_timestamp.value
src = get_dataset(LC_data,timestamp)
source.data.update(src.data)
#Fixed plot's atributes
#First ring (fr) parameters
fr_inner_radius = 140
fr_outer_radius = 200
#Second ring (sr) parameters
sr_inner_radius = fr_outer_radius+2
sr_outer_radius = fr_outer_radius+52
#third ring (tr) parameters
tr_inner_radius = fr_outer_radius+52+2,
tr_outer_radius = fr_outer_radius+52+2+42
#Read data and change the columns name. Columns names were changed because the orinals have some espaces and special characters
# that makes more complicated the string manipulation. For instace : ' NAME' , 'START DATE(UTC)'.
LC_data = pd.read_csv('../data/Life Cycle/example/LC_export 3.csv')
LC_data.columns = ['Start_Date', 'End_Date','Start_Time','End_time','Duration','Name','Location']
#Convert 'Start_Date' to datetime64[ns] to use pands Time Series / Date functionality.
LC_data['Start_Date'] = pd.to_datetime(LC_data.Start_Date)
#Get all the timestamps per a selected day
unique_days_list = LC_data.Start_Date.dt.date
index_hours_same_day = np.where(LC_data.Start_Date.dt.date==unique_days_list.unique()[2])
index_hours_same_day[0][4]
events_at_day = LC_data.Start_Date[list(index_hours_same_day[0][:])]
columns_ud = ['Unique_Days']
New_data_days_unique = | pd.DataFrame(unique_days_list.index,columns=columns_ud) | pandas.DataFrame |
import pandas as pd
import pytest
from feature_engine.creation import CyclicalFeatures
@pytest.fixture
def df_cyclical():
df = {
"day": [6, 7, 5, 3, 1, 2, 4],
"months": [3, 7, 9, 12, 4, 6, 12],
}
df = pd.DataFrame(df)
return df
def test_general_transformation_without_dropping_variables(df_cyclical):
# test case 1: just one variable.
cyclical = CyclicalFeatures(variables=["day"])
X = cyclical.fit_transform(df_cyclical)
transf_df = df_cyclical.copy()
# expected output
transf_df["day_sin"] = [
-0.78183,
0.0,
-0.97493,
0.43388,
0.78183,
0.97493,
-0.43388,
]
transf_df["day_cos"] = [
0.623490,
1.0,
-0.222521,
-0.900969,
0.623490,
-0.222521,
-0.900969,
]
# fit attr
assert cyclical.max_values_ == {"day": 7}
# test transform output
pd.testing.assert_frame_equal(X, transf_df)
def test_general_transformation_dropping_original_variables(df_cyclical):
# test case 1: just one variable, but dropping the variable after transformation
cyclical = CyclicalFeatures(variables=["day"], drop_original=True)
X = cyclical.fit_transform(df_cyclical)
transf_df = df_cyclical.copy()
# expected output
transf_df["day_sin"] = [
-0.78183,
0.0,
-0.97493,
0.43388,
0.78183,
0.97493,
-0.43388,
]
transf_df["day_cos"] = [
0.623490,
1.0,
-0.222521,
-0.900969,
0.623490,
-0.222521,
-0.900969,
]
transf_df = transf_df.drop(columns="day")
# test fit attr
assert cyclical.n_features_in_ == 2
assert cyclical.max_values_ == {"day": 7}
# test transform output
| pd.testing.assert_frame_equal(X, transf_df) | pandas.testing.assert_frame_equal |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n_it','--n_iteration',required=True)
parser.add_argument('-protein','--protein',required=True)
parser.add_argument('-file_path','--file_path',required=True)
parser.add_argument('-mdd','--morgan_directory',required=True)
io_args = parser.parse_args()
n_iteration = int(io_args.n_iteration)
protein = io_args.protein
file_path = io_args.file_path
mdd=io_args.morgan_directory
import pandas as pd
import time
import numpy as np
import glob
import os
from keras.models import model_from_json
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve,roc_curve,fbeta_score, precision_score, recall_score
from shutil import copy2
#protein_name = 'CAMKK2/mini_pd'
#file_path = '../'
#iteration_done = 1
t_mol = pd.read_csv(mdd+'/Mol_ct_file.csv',header=None)[[0]].sum()[0]/1000000
hyperparameters = pd.read_csv(file_path+'/'+protein+'/iteration_'+str(n_iteration)+'/'+'hyperparameter_morgan_with_freq_v3.csv',header=None)
hyperparameters.columns = ['Model_no','Over_sampling','Batch_size','Learning_rate','N_layers','N_units','dropout',
'weight','cutoff','ROC_AUC','Pr_0_9','tot_left_0_9_mil','auc_te','pr_te','re_te','tot_left_0_9_mil_te','tot_positives']
hyperparameters.tot_left_0_9_mil = hyperparameters.tot_left_0_9_mil/1000000
hyperparameters.tot_left_0_9_mil_te = hyperparameters.tot_left_0_9_mil_te/1000000
hyperparameters['re_vl/re_pr'] = 0.9/hyperparameters.re_te
tmp = hyperparameters.groupby('cutoff')
cf_values = {}
for mini_df in tmp:
print(mini_df[0])
print(mini_df[1]['re_vl/re_pr'].mean())
print(mini_df[1]['re_vl/re_pr'].std())
cf_values[mini_df[0]] = mini_df[1]['re_vl/re_pr'].std()
#print(mini_df[1][mini_df[1].tot_left_0_9_mil_te==mini_df[1].tot_left_0_9_mil_te.min()])
print(cf_values)
model_to_use_with_cf = []
ind_pr = []
for cf in cf_values:
if cf_values[cf]<0.01:
tmp = hyperparameters[hyperparameters.cutoff==cf]
thr = 0.9
while 1==1:
if len(tmp[tmp.re_te>=thr])>=3:
tmp = tmp[tmp.re_te>=thr]
break
else:
thr = thr - 0.01
#tmp = tmp[tmp.re_te>=0.895]
#if len(tmp)
tmp = tmp.sort_values('pr_te')[::-1]
try:
model_to_use_with_cf.append([cf,tmp.Model_no.iloc[0]])
ind_pr.append([cf,tmp.pr_te.iloc[0]])
except:
pass
else:
tmp = hyperparameters[hyperparameters.cutoff==cf]
thr = 0.9
while 1==1:
if len(tmp[tmp.re_te>=thr])>=3:
tmp = tmp[tmp.re_te>=thr]
break
else:
thr = thr - 0.01
#tmp = tmp[tmp.re_te>=0.895]
tmp = tmp.sort_values('pr_te')[::-1]
try:
model_to_use_with_cf.append([cf,tmp.Model_no.iloc[:3].values])
ind_pr.append([cf,tmp.pr_te.iloc[:3].values])
except:
pass
#v_temp = []
#for i in range(len(model_to_use_with_cf)):
# cf = model_to_use_with_cf[i][0]
# tmp = hyperparameters[hyperparameters.cutoff==cf]
# t_pos = tmp.tot_positives.unique()
# if t_pos>150:
# v_temp.append(model_to_use_with_cf[i])
#model_to_use_with_cf = v_temp
print(model_to_use_with_cf)
print(ind_pr)
all_model_files = {}
for f in glob.glob(file_path+'/'+protein+'/iteration_'+str(n_iteration)+'/all_models/*'):
all_model_files[f] = 1
for f in glob.glob(file_path+'/'+protein+'/iteration_'+str(n_iteration)+'/all_models/*'):
try:
mn = int(f.split('/')[-1].split('_')[1])
except:
mn = int(f.split('/')[-1].split('_')[1].split('.')[0])
for i in range(len(model_to_use_with_cf)):
try:
if mn in model_to_use_with_cf[i][-1]:
all_model_files.pop(f)
except:
if mn==model_to_use_with_cf[i][-1]:
all_model_files.pop(f)
for f in all_model_files.keys():
os.remove(f)
def get_all_x_data(fname,y):
train_set = np.zeros([1000000,1024])
train_id = []
with open(fname,'r') as ref:
no=0
for line in ref:
tmp=line.rstrip().split(',')
train_id.append(tmp[0])
on_bit_vector = tmp[1:]
for elem in on_bit_vector:
train_set[no,int(elem)] = 1
no+=1
train_set = train_set[:no,:]
train_pd = pd.DataFrame(data=train_set)
train_pd['ZINC_ID'] = train_id
if len(y.columns)!=2:
y.reset_index(level=0,inplace=True)
else:
print('already 2 columns: ',fname)
score_col = y.columns.difference(['ZINC_ID'])[0]
train_data = pd.merge(y,train_pd,how='inner',on=['ZINC_ID'])
X_train = train_data[train_data.columns.difference(['ZINC_ID',score_col])].values
y_train = train_data[[score_col]].values
return X_train,y_train
try:
valid_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/valid_morgan_1024_updated.csv',header=None,usecols=[0])
except:
valid_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/valid_morgan_1024_updated.csv',header=None,usecols=[0],engine='python')
try:
if 'ZINC' in valid_pd.index[0]:
valid_pd = pd.DataFrame(data=valid_pd.index)
except:
pass
valid_pd.columns= ['ZINC_ID']
valid_label = pd.read_csv(file_path+'/'+protein+'/iteration_1/validation_labels.txt',sep=',',header=0)
validation_data = pd.merge(valid_label,valid_pd,how='inner',on=['ZINC_ID'])
validation_data.set_index('ZINC_ID',inplace=True)
y_valid = validation_data
try:
test_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/test_morgan_1024_updated.csv',header=None,usecols=[0])
except:
test_pd = pd.read_csv(file_path+'/'+protein+'/iteration_1/morgan/test_morgan_1024_updated.csv',header=None,usecols=[0],engine='python')
try:
if 'ZINC' in test_pd.index[0]:
test_pd = | pd.DataFrame(data=test_pd.index) | pandas.DataFrame |
import os
import glob
import requests
import pandas as pd
from credential import API_KEY
"""
Notice:
This script assume that you have unnormalised csv files under ../csv_data/
after you run ./data_creation.py.
"""
##########
# Rename table names
##########
target_dir = '../csv_data/'
change_dict = {
'production_companies': 'company_info',
'countries': 'country_info',
'genre': 'genre_info',
'spoken_languages': 'language_info',
'people': 'person'
}
for original_name, new_name in change_dict.items():
source = target_dir + original_name
dest = target_dir + new_name
try:
os.rename(source, dest)
except Exception as e:
print("File Exception: ", e)
##########
# Make --> Genres, Companies, Countries, Spoken_languages, People
##########
# new df init
df_genres = pd.DataFrame(columns=['mid', 'genre_id'])
df_companies = pd.DataFrame(columns=['mid', 'company_id'])
df_countries = pd.DataFrame(columns=['mid', 'country_id'])
df_spoken_languages = pd.DataFrame(columns=['mid', 'language_id'])
df_people = pd.DataFrame(columns=['mid', 'person_id'])
# read original movies csv
movies_orig = pd.read_csv(f'{target_dir}movies.csv')
# Do not apply this for spoken languags in movie
def oneNFise(df, target_col, target_df):
df_interest = df[['mid', target_col]]
for i, row in df_interest.iterrows():
curr_mid = row.mid
curr_target_col_val = str(row[target_col])
isNumeric = True if curr_target_col_val.isnumeric() else False
# append empty val
if curr_target_col_val=='nan':
target_df = target_df.append(pd.Series({'mid':curr_mid, target_col: ''}), ignore_index=True)
# Expand
elif '|' in curr_target_col_val:
id_list = (df[target_col].iloc[i]).split('|')
for id in id_list:
val = int(id) if isNumeric else id
append_series = pd.Series({
'mid': curr_mid,
target_col: val
})
target_df = target_df.append(append_series, ignore_index=True)
# Only one id so append just that
else:
val = int(curr_target_col_val) if isNumeric else curr_target_col_val
target_df = target_df.append(pd.Series({'mid':curr_mid, target_col: val}), ignore_index=True)
return target_df
def insert_index(df):
df.insert(loc=0, column='index', value=[i for i in range(len(df))])
# Genres
df_genres = oneNFise(movies_orig, 'genre_id', df_genres)
insert_index(df_genres)
df_genres.to_csv(f'{target_dir}genres.csv', index=False)
# Countries
df_countries = oneNFise(movies_orig, 'country_id', df_countries)
insert_index(df_countries)
df_countries.to_csv(f'{target_dir}countries.csv', index=False)
# Companies
df_companies = oneNFise(movies_orig, 'company_id', df_companies)
insert_index(df_companies)
df_companies.to_csv(f'{target_dir}companies.csv', index=False)
# People
df_people = oneNFise(movies_orig, 'person_id', df_people)
insert_index(df_people)
df_people.to_csv(f'{target_dir}people.csv', index=False)
# Spoken_languages
# code below does the same thing as oneNFise but just only for Spoken_languages
df_interest = movies_orig[['mid', 'spoken_languages']]
for i, row in df_interest.iterrows():
curr_mid = row.mid
curr_target_col_val = str(row['spoken_languages'])
if curr_target_col_val=='nan':
df_spoken_languages = df_spoken_languages.append(pd.Series({'mid':curr_mid, 'language_id': ''}), ignore_index=True)
elif '|' in curr_target_col_val:
id_list = (df_interest['spoken_languages'].iloc[i]).split('|')
for id in id_list:
append_series = pd.Series({'mid': curr_mid, 'language_id': id})
df_spoken_languages = df_spoken_languages.append(append_series, ignore_index=True)
else:
df_spoken_languages = df_spoken_languages.append(pd.Series({'mid':curr_mid, 'language_id': curr_target_col_val}), ignore_index=True)
insert_index(df_spoken_languages)
df_spoken_languages.to_csv(f'{target_dir}spoken_languages.csv', index=False)
# Drop columns in movie original
df_movies = movies_orig.drop(columns=['genre_id', 'company_id', 'country_id', 'spoken_languages', 'person_id'])
df_movies.to_csv(f'{target_dir}movies.csv', index=False)
##########
# Add more data in normalised form
##########
# Table: Translation
def request_translations(mid):
endpoint = f"https://api.themoviedb.org/3/movie/{mid}/translations?api_key={API_KEY}"
# sending get request and saving the response as response object
r = requests.get(url=endpoint)
# extracting data in json format
data = r.json()
return data
movies = | pd.read_csv(f'{target_dir}movies.csv') | pandas.read_csv |
# EcoFOCI
"""Contains a collection of ADCP equipment parsing.
These include:
* LR-ADCP
* Teledyne ADCP
* RCM ADCP
"""
import numpy as np
import pandas as pd
class adcp(object):
"""
"""
def __init__(self,serialno=None,depdir=None):
if depdir:
self.depdir = depdir + serialno
else:
self.depdir = None
def load_pg_file(self, pgfile_path=None, datetime_index=True):
"""load Pecent Good (PG) file
The four Percent Good values represent (in order):
1) The percentage of good three beam solutions (one beam rejected);
2) The percentage of good transformations (error velocity threshold not exceeded);
3) The percentage of measurements where more than one beam was bad;
4) The percentage of measurements with four beam solutions. <--- use this to qc data stream
Args:
pgfile_path (str, optional): full path to pg file. Defaults to ''.
"""
if self.depdir:
pgfile_path = self.depdir + '.PG'
self.pg_df = pd.read_csv(pgfile_path,delimiter='\s+',header=None,names=['date','time','bin','pg3beam-good','pgtransf-good','pg1beam-bad','pg4beam-good'])
self.pg_df["date_time"] = pd.to_datetime(self.pg_df.date+' '+self.pg_df.time,format="%y/%m/%d %H:%M:%S")
if datetime_index:
self.pg_df = self.pg_df.set_index(pd.DatetimeIndex(self.pg_df['date_time'])).drop(['date_time','date','time'],axis=1)
return self.pg_df
def load_ein_file(self, einfile_path=None, datetime_index=True):
if self.depdir:
einfile_path = self.depdir + '.EIN'
self.ein_df = pd.read_csv(einfile_path,delimiter='\s+',header=None,names=['date','time','bin','agc1','agc2','agc3','agc4'])
self.ein_df["date_time"] = pd.to_datetime(self.ein_df.date+' '+self.ein_df.time,format="%y/%m/%d %H:%M:%S")
if datetime_index:
self.ein_df = self.ein_df.set_index(pd.DatetimeIndex(self.ein_df['date_time'])).drop(['date_time','date','time'],axis=1)
return self.ein_df
def load_vel_file(self, velfile_path=None, datetime_index=True):
if self.depdir:
velfile_path = self.depdir + '.VEL'
self.vel_df = pd.read_csv(velfile_path,delimiter='\s+',header=None,
names=['date','time','bin','u_curr_comp','v_curr_comp','w_curr_comp','w_curr_comp_err'])
self.vel_df["date_time"] = | pd.to_datetime(self.vel_df.date+' '+self.vel_df.time,format="%y/%m/%d %H:%M:%S") | pandas.to_datetime |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_file_path, categories_file_path):
"""
Load two files into dataframes and merget them.
Args:
messages_file_path(str): The file path of Messages file
categories_file_path(str): The file path of Categories file
Returns:
df: The merged dataframe
"""
messages = pd.read_csv(messages_file_path)
categories = | pd.read_csv(categories_file_path) | pandas.read_csv |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sys
from os.path import join as pjoin
import scanpy as sc
import anndata
from sklearn.metrics import r2_score, mean_squared_error
from gpsa import VariationalGPSA, rbf_kernel
from gpsa.plotting import callback_twod
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF, Matern
from scipy.sparse import load_npz
## For PASTE
import scanpy as sc
import anndata
import matplotlib.patches as mpatches
from sklearn.neighbors import NearestNeighbors, KNeighborsRegressor
from sklearn.metrics import r2_score
device = "cuda" if torch.cuda.is_available() else "cpu"
def scale_spatial_coords(X, max_val=10.0):
X = X - X.min(0)
X = X / X.max(0)
return X * max_val
DATA_DIR = "../../../data/slideseq/mouse_hippocampus"
N_GENES = 10
N_SAMPLES = 2000
n_spatial_dims = 2
n_views = 2
m_G = 200
m_X_per_view = 200
N_LATENT_GPS = {"expression": None}
N_EPOCHS = 2000
PRINT_EVERY = 100
FRAC_TEST = 0.2
N_REPEATS = 10
GENE_IDX_TO_TEST = np.arange(N_GENES)
def process_data(adata, n_top_genes=2000):
adata.var_names_make_unique()
adata.var["mt"] = adata.var_names.str.startswith("MT-")
sc.pp.calculate_qc_metrics(adata, qc_vars=["mt"], inplace=True)
sc.pp.filter_cells(adata, min_counts=500) # 1800
# sc.pp.filter_cells(adata, max_counts=35000)
# adata = adata[adata.obs["pct_counts_mt"] < 20]
# sc.pp.filter_genes(adata, min_cells=10)
sc.pp.normalize_total(adata, inplace=True)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(
adata, flavor="seurat", n_top_genes=n_top_genes, subset=True
)
return adata
spatial_locs_slice1 = pd.read_csv(
pjoin(DATA_DIR, "Puck_200115_08_spatial_locs.csv"), index_col=0
)
expression_slice1 = load_npz(pjoin(DATA_DIR, "Puck_200115_08_expression.npz"))
gene_names_slice1 = pd.read_csv(
pjoin(DATA_DIR, "Puck_200115_08_gene_names.csv"), index_col=0
)
barcode_names_slice1 = pd.read_csv(
pjoin(DATA_DIR, "Puck_200115_08_barcode_names.csv"), index_col=0
)
data_slice1 = anndata.AnnData(
X=expression_slice1, obs=barcode_names_slice1, var=gene_names_slice1
)
data_slice1.obsm["spatial"] = spatial_locs_slice1.values
data_slice1 = process_data(data_slice1, n_top_genes=6000)
spatial_locs_slice2 = pd.read_csv(
pjoin(DATA_DIR, "Puck_191204_01_spatial_locs.csv"), index_col=0
)
expression_slice2 = load_npz(pjoin(DATA_DIR, "Puck_191204_01_expression.npz"))
gene_names_slice2 = pd.read_csv(
pjoin(DATA_DIR, "Puck_191204_01_gene_names.csv"), index_col=0
)
barcode_names_slice2 = pd.read_csv(
pjoin(DATA_DIR, "Puck_191204_01_barcode_names.csv"), index_col=0
)
data_slice2 = anndata.AnnData(
X=expression_slice2, obs=barcode_names_slice2, var=gene_names_slice2
)
data_slice2.obsm["spatial"] = spatial_locs_slice2.values
data_slice2 = process_data(data_slice2, n_top_genes=6000)
if N_SAMPLES is not None:
rand_idx = np.random.choice(
np.arange(data_slice1.shape[0]), size=N_SAMPLES, replace=False
)
data_slice1 = data_slice1[rand_idx]
rand_idx = np.random.choice(
np.arange(data_slice2.shape[0]), size=N_SAMPLES, replace=False
)
data_slice2 = data_slice2[rand_idx]
# rand_idx = np.random.choice(
# np.arange(data.shape[0]), size=N_SAMPLES * 2, replace=False
# )
# data = data[rand_idx]
## Remove outlier points outside of puck
MAX_NEIGHBOR_DIST = 700
knn = NearestNeighbors(n_neighbors=10).fit(data_slice1.obsm["spatial"])
neighbor_dists, _ = knn.kneighbors(data_slice1.obsm["spatial"])
inlier_idx = np.where(neighbor_dists[:, -1] < MAX_NEIGHBOR_DIST)[0]
data_slice1 = data_slice1[inlier_idx]
knn = NearestNeighbors(n_neighbors=10).fit(data_slice2.obsm["spatial"])
neighbor_dists, _ = knn.kneighbors(data_slice2.obsm["spatial"])
inlier_idx = np.where(neighbor_dists[:, -1] < MAX_NEIGHBOR_DIST)[0]
data_slice2 = data_slice2[inlier_idx]
## Save original data
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.scatter(
data_slice1.obsm["spatial"][:, 0],
data_slice1.obsm["spatial"][:, 1],
# c=np.log(np.array(data_slice1.X[:, 0].todense()) + 1)
s=3,
)
plt.title("Slice 1", fontsize=30)
plt.gca().invert_yaxis()
plt.axis("off")
plt.subplot(122)
plt.scatter(
data_slice2.obsm["spatial"][:, 0],
data_slice2.obsm["spatial"][:, 1],
# c=np.log(np.array(data_slice2.X[:, 0].todense()) + 1)
s=3,
)
plt.title("Slice 2", fontsize=30)
plt.gca().invert_yaxis()
plt.axis("off")
plt.savefig("./out/slideseq_original_slices.png")
# plt.show()
plt.close()
# import ipdb
# ipdb.set_trace()
angle = 1.45
slice1_coords = data_slice1.obsm["spatial"].copy()
slice2_coords = data_slice2.obsm["spatial"].copy()
slice1_coords = scale_spatial_coords(slice1_coords, max_val=10) - 5
slice2_coords = scale_spatial_coords(slice2_coords, max_val=10) - 5
R = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
slice2_coords = slice2_coords @ R
slice2_coords += np.array([1.0, 1.0])
data_slice1.obsm["spatial"] = slice1_coords
data_slice2.obsm["spatial"] = slice2_coords
print(data_slice1.shape, data_slice2.shape)
data = data_slice1.concatenate(data_slice2)
## Remove genes with no variance
shared_gene_names = data.var.gene_ids.index.values
data_slice1 = data_slice1[:, shared_gene_names]
data_slice2 = data_slice2[:, shared_gene_names]
nonzerovar_idx = np.intersect1d(
np.where(np.array(data_slice1.X.todense()).var(0) > 0)[0],
np.where(np.array(data_slice2.X.todense()).var(0) > 0)[0],
)
data = data[:, nonzerovar_idx]
data_slice1 = data_slice1[:, nonzerovar_idx]
data_slice2 = data_slice2[:, nonzerovar_idx]
# import ipdb
# ipdb.set_trace()
data_knn = data_slice1 #[:, shared_gene_names]
X_knn = data_knn.obsm["spatial"]
Y_knn = np.array(data_knn.X.todense())
Y_knn = (Y_knn - Y_knn.mean(0)) / Y_knn.std(0)
# nbrs = NearestNeighbors(n_neighbors=2).fit(X_knn)
# distances, indices = nbrs.kneighbors(X_knn)
knn = KNeighborsRegressor(n_neighbors=10, weights="uniform").fit(X_knn, Y_knn)
preds = knn.predict(X_knn)
# preds = Y_knn[indices[:, 1]]
r2_vals = r2_score(Y_knn, preds, multioutput="raw_values")
gene_idx_to_keep = np.where(r2_vals > 0.3)[0]
N_GENES = min(N_GENES, len(gene_idx_to_keep))
gene_names_to_keep = data_knn.var.gene_ids.index.values[gene_idx_to_keep]
gene_names_to_keep = gene_names_to_keep[np.argsort(-r2_vals[gene_idx_to_keep])]
r2_vals_sorted = -1 * np.sort(-r2_vals[gene_idx_to_keep])
if N_GENES < len(gene_names_to_keep):
gene_names_to_keep = gene_names_to_keep[:N_GENES]
data = data[:, gene_names_to_keep]
# if N_SAMPLES is not None:
# rand_idx = np.random.choice(
# np.arange(data_slice1.shape[0]), size=N_SAMPLES, replace=False
# )
# data_slice1 = data_slice1[rand_idx]
# rand_idx = np.random.choice(
# np.arange(data_slice2.shape[0]), size=N_SAMPLES, replace=False
# )
# data_slice2 = data_slice2[rand_idx]
# # rand_idx = np.random.choice(
# # np.arange(data.shape[0]), size=N_SAMPLES * 2, replace=False
# # )
# # data = data[rand_idx]
# data = data_slice1.concatenate(data_slice2)
all_slices = anndata.concat([data_slice1, data_slice2])
n_samples_list = [data[data.obs.batch == str(ii)].shape[0] for ii in range(n_views)]
X1 = np.array(data[data.obs.batch == "0"].obsm["spatial"])
X2 = np.array(data[data.obs.batch == "1"].obsm["spatial"])
Y1 = np.array(data[data.obs.batch == "0"].X.todense())
Y2 = np.array(data[data.obs.batch == "1"].X.todense())
Y1 = (Y1 - Y1.mean(0)) / Y1.std(0)
Y2 = (Y2 - Y2.mean(0)) / Y2.std(0)
X = np.concatenate([X1, X2])
Y = np.concatenate([Y1, Y2])
view_idx = [
np.arange(X1.shape[0]),
np.arange(X1.shape[0], X1.shape[0] + X2.shape[0]),
]
errors_union, errors_separate, errors_gpsa = [], [], []
for repeat_idx in range(N_REPEATS):
## Drop part of the second view (this is the part we'll try to predict)
second_view_idx = view_idx[1]
n_drop = int(1.0 * n_samples_list[1] * FRAC_TEST)
test_idx = np.random.choice(second_view_idx, size=n_drop, replace=False)
## Only test on interior of tissue
interior_idx = np.where(
(X[:, 0] > 2.5) & (X[:, 0] < 7.5) & (X[:, 1] > 2.5) & (X[:, 1] < 7.5)
)[0]
test_idx = np.intersect1d(interior_idx, test_idx)
n_drop = test_idx.shape[0]
keep_idx = np.setdiff1d(second_view_idx, test_idx)
train_idx = np.concatenate([np.arange(n_samples_list[0]), keep_idx])
X_train = X[train_idx]
Y_train = Y[train_idx]
n_samples_list_train = n_samples_list.copy()
n_samples_list_train[1] -= n_drop
n_samples_list_test = [[0], [n_drop]]
X_test = X[test_idx]
Y_test = Y[test_idx]
gene_idx_to_keep = np.logical_and(np.var(Y_train, axis=0) > 1e-1, np.var(Y_test, axis=0) > 1e-1)
GENE_IDX_TO_TEST = np.intersect1d(GENE_IDX_TO_TEST, gene_idx_to_keep)
Y_train = Y_train[:, gene_idx_to_keep]
Y_test = Y_test[:, gene_idx_to_keep]
# import ipdb; ipdb.set_trace()
x_train = torch.from_numpy(X_train).float().clone()
y_train = torch.from_numpy(Y_train).float().clone()
x_test = torch.from_numpy(X_test).float().clone()
y_test = torch.from_numpy(Y_test).float().clone()
data_dict_train = {
"expression": {
"spatial_coords": x_train,
"outputs": y_train,
"n_samples_list": n_samples_list_train,
}
}
data_dict_test = {
"expression": {
"spatial_coords": x_test,
"outputs": y_test,
"n_samples_list": n_samples_list_test,
}
}
model = VariationalGPSA(
data_dict_train,
n_spatial_dims=n_spatial_dims,
m_X_per_view=m_X_per_view,
m_G=m_G,
data_init=True,
minmax_init=False,
grid_init=False,
n_latent_gps=N_LATENT_GPS,
mean_function="identity_fixed",
kernel_func_warp=rbf_kernel,
kernel_func_data=rbf_kernel,
# fixed_warp_kernel_variances=np.ones(n_views) * 1.,
# fixed_warp_kernel_lengthscales=np.ones(n_views) * 10,
fixed_view_idx=0,
).to(device)
view_idx_train, Ns_train, _, _ = model.create_view_idx_dict(data_dict_train)
view_idx_test, Ns_test, _, _ = model.create_view_idx_dict(data_dict_test)
## Make predictions for naive alignment
gpr_union = GaussianProcessRegressor(kernel=RBF() + WhiteKernel())
gpr_union.fit(X=X_train, y=Y_train)
preds = gpr_union.predict(X_test)
knn = KNeighborsRegressor(n_neighbors=10)
knn.fit(X=X_train, y=Y_train)
preds = knn.predict(X_test)
# error_union = np.mean(np.sum((preds - Y_test) ** 2, axis=1))
error_union = r2_score(Y_test[:, GENE_IDX_TO_TEST], preds[:, GENE_IDX_TO_TEST]) #, multioutput="raw_values")
errors_union.append(error_union)
print("MSE, union: {}".format(round(error_union, 5)), flush=True)
#
# print("R2, union: {}".format(round(r2_union, 5)))
# import ipdb; ipdb.set_trace()
## Make predictons for each view separately
preds, truth = [], []
for vv in range(n_views):
curr_trainX = X_train[view_idx_train["expression"][vv]]
curr_trainY = Y_train[view_idx_train["expression"][vv]]
curr_testX = X_test[view_idx_test["expression"][vv]]
curr_testY = Y_test[view_idx_test["expression"][vv]]
if len(curr_testX) == 0:
continue
# gpr_separate = GaussianProcessRegressor(kernel=RBF() + WhiteKernel())
# gpr_separate.fit(X=curr_trainX, y=curr_trainY)
# curr_preds = gpr_separate.predict(curr_testX)
knn = KNeighborsRegressor(n_neighbors=10)
knn.fit(X=curr_trainX, y=curr_trainY)
curr_preds = knn.predict(curr_testX)
preds.append(curr_preds)
truth.append(curr_testY)
preds = np.concatenate(preds, axis=0)
truth = np.concatenate(truth, axis=0)
# error_separate = np.mean(np.sum((preds - truth) ** 2, axis=1))
error_separate = r2_score(truth[:, GENE_IDX_TO_TEST], preds[:, GENE_IDX_TO_TEST])
print("MSE, separate: {}".format(round(error_separate, 5)), flush=True)
# print("R2, sep: {}".format(round(r2_sep, 5)))
errors_separate.append(error_separate)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
def train(model, loss_fn, optimizer):
model.train()
# Forward pass
G_means, G_samples, F_latent_samples, F_samples = model.forward(
X_spatial={"expression": x_train}, view_idx=view_idx_train, Ns=Ns_train, S=3
)
# Compute loss
loss = loss_fn(data_dict_train, F_samples)
# Compute gradients and take optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), G_means
# Set up figure.
fig = plt.figure(figsize=(18, 7), facecolor="white", constrained_layout=True)
data_expression_ax = fig.add_subplot(131, frameon=False)
latent_expression_ax = fig.add_subplot(132, frameon=False)
prediction_ax = fig.add_subplot(133, frameon=False)
plt.show(block=False)
for t in range(N_EPOCHS):
loss, G_means = train(model, model.loss_fn, optimizer)
if t % PRINT_EVERY == 0 or t == N_EPOCHS - 1:
print("Iter: {0:<10} LL {1:1.3e}".format(t, -loss))
G_means_test, _, _, F_samples_test, = model.forward(
X_spatial={"expression": x_test},
view_idx=view_idx_test,
Ns=Ns_test,
prediction_mode=True,
S=10,
)
curr_preds = torch.mean(F_samples_test["expression"], dim=0)
# callback_twod(
# model,
# X_train,
# Y_train,
# data_expression_ax=data_expression_ax,
# latent_expression_ax=latent_expression_ax,
# # prediction_ax=ax_dict["preds"],
# X_aligned=G_means,
# # X_test=X_test,
# # Y_test_true=Y_test,
# # Y_pred=curr_preds,
# # X_test_aligned=G_means_test,
# )
# plt.draw()
# plt.pause(1 / 60.0)
error_gpsa = np.mean(
np.sum((Y_test - curr_preds.detach().numpy()) ** 2, axis=1)
)
# print("MSE, GPSA: {}".format(round(error_gpsa, 5)), flush=True)
# r2_gpsa = r2_score(Y_test, curr_preds.detach().numpy())
# print("R2, GPSA: {}".format(round(r2_gpsa, 5)))
curr_aligned_coords = G_means["expression"].detach().numpy()
curr_aligned_coords_test = G_means_test["expression"].detach().numpy()
try:
# gpr_gpsa = GaussianProcessRegressor(kernel=RBF() + WhiteKernel())
# gpr_gpsa.fit(X=curr_aligned_coords, y=Y_train)
# preds = gpr_gpsa.predict(curr_aligned_coords_test)
knn = KNeighborsRegressor(n_neighbors=10)
knn.fit(X=curr_aligned_coords, y=Y_train)
preds = knn.predict(curr_aligned_coords_test)
# error_gpsa = np.mean(np.sum((preds - Y_test) ** 2, axis=1))
error_gpsa = r2_score(Y_test[:, GENE_IDX_TO_TEST], preds[:, GENE_IDX_TO_TEST])
print("MSE, GPSA GPR: {}".format(round(error_gpsa, 5)), flush=True)
except:
continue
errors_gpsa.append(error_gpsa)
plt.close()
results_df = pd.DataFrame(
{
"Union": errors_union[: repeat_idx + 1],
"Separate": errors_separate[: repeat_idx + 1],
"GPSA": errors_gpsa[: repeat_idx + 1],
}
)
results_df_melted = | pd.melt(results_df) | pandas.melt |
import numpy as np
import pandas as pd
from pandas.tseries import converter
from pathlib import Path
from tqdm import tqdm
from datetime import datetime
from calendar import monthrange
from calendar import month_name
import matplotlib.pyplot as plt
import seaborn as sns
import swifter
import calendar
import pytz
class helioclim3:
def __init__(self, hc3csvfile, pkl_filename):
self.csvfile = hc3csvfile # csv file
cwd = Path.cwd()
pkl_file = cwd / pkl_filename
def savepandasfile(df, output=pkl_filename):
df = self.loading()
df.to_pickle(output)
def loadpandasfile(file=pkl_filename):
try:
return pd.read_pickle(file)
except:
print('LOADING FILE ERROR\n')
if pkl_file.exists():
print('LOADING .PKL FILE\n')
self.df = loadpandasfile()
else:
savepandasfile(self.csvfile, pkl_filename)
self.df = loadpandasfile()
# def dfloaded(self):
# return self.df
def _fixhours(self, str_datetime):
# fix hour format 24:00:00
if '24:' in str_datetime:
str_datetime = str_datetime.replace(
':00', '', 1).replace('24:', '00:')
return pd.to_datetime(str_datetime, format='%d/%m/%Y %H:%M')
else:
return pd.to_datetime(str_datetime, format='%d/%m/%Y %H:%M')
def loading(self, datafix=True):
df = pd.read_csv(self.csvfile, skiprows=31, sep=";",
parse_dates=[['# Date', 'Time']])
# Rename columns
df.rename(columns={"# Date_Time": "Date"}, inplace=True)
# Temperature °F to °C
df["Temperature"] = [(i - 273.15)
for i in df["Temperature"]]
# Fix date
# tqdm.pandas()
df.loc[:, "Date"] = df.Date.swifter.apply(self._fixhours)
# Assign "Date" column to index
df.set_index("Date", inplace=True)
# Convert Date to correct timezone
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# https://stackoverflow.com/questions/22800079/converting-time-zone-pandas-dataframe
buenos_aires = pytz.timezone('America/Buenos_Aires')
df.index = df.index.tz_localize(pytz.utc).tz_convert(buenos_aires)
# Fix values -999
if datafix:
for i in df.columns:
df.loc[df[str(i)] <= -999, str(i)] = 0
else:
pass
return df
def plot(self, date1: str, date2: str, data='Global Horiz'):
df = self.loading()
converter.register()
d1 = datetime.strptime(date1, '%Y-%m-%d')
d2 = datetime.strptime(date2, '%Y-%m-%d')
df2plot = df.loc[d1:d2]
sns.set(style="darkgrid")
f, ax = plt.subplots(figsize=(10, 5))
sns.lineplot(x=df2plot.index, y=df2plot[data])
# Removing top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Finalize the plot
sns.despine(bottom=True)
plt.setp(f.axes, xticks=[],
xlabel='Interval\nfrom: {0}\nto: {1}'.format(date1, date2),
ylabel='Solar Irradiation (W/m²)')
plt.tight_layout(h_pad=2)
plt.savefig("output.png")
print('Image saved.')
def analysis(self):
"""Characteristics of the data and NULL values
"""
df = self.loading(datafix=False)
noIrrad = df[df['Global Horiz'] == -999]
noIrradDays = noIrrad.index.date
noDataIrrad = len(noIrradDays)
totalIrrad = len(df['Global Horiz'])
percDataIrrad = (noDataIrrad/totalIrrad) * 100
yearsIrrad = sorted(set(df.index.year.values))
print('\nIntervalo de dados de medição: {0:d} a {1:d}'.format(
min(yearsIrrad), max(yearsIrrad)))
print('Número de linhas sem dados de irradiação: {0}'.format(
noDataIrrad))
print('Número total de linhas: {0}'.format(totalIrrad))
print('Porcentagem de linhas sem dados de irradiação: {0:2.4f} %'.format(
percDataIrrad))
print('\nDias do ano sem registro de irradiação:')
for i in sorted(set(noIrradDays)):
print(i.strftime('%d/%m/%Y'))
code = [0, 1, 2, 5, 6]
numberbyCode = {i: len(df[df["Code"] == i]) for i in code}
idbyCode = {0: 'no data', 1: 'sun below horizon',
2: 'satellite assessment', 5: 'interpolation in time', 6: 'forecast'}
for i in numberbyCode.keys():
print("{0}: {1} - {2:2.1f}%".format(
idbyCode[i], numberbyCode[i], (numberbyCode[i] / totalIrrad)*100))
df.info().to_string()
def averSolarIrrad(self):
"""Calculates the average values for the irradiation kW/m²
"""
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#anchored-offsets
df = self.df
df.drop(columns=['Top of Atmosphere', 'Code', 'Relative Humidity',
'Wind direction', 'Rainfall', 'Snowfall',
'Snow depth', 'month', 'year'], inplace=True)
print(df.head().to_string())
# Calculo da media para cada mes
Wh_m2_mes = df.groupby(df.index.month).mean()['Global Horiz']
kWh_m2_dia = df.groupby(df.index.month).mean()[
'Global Horiz'] * 24/1000
Months = Wh_m2_mes.index.to_list()
result = {'kWh/m²_Diário': kWh_m2_dia,
'Wh/m²_Mensal': Wh_m2_mes, 'Month': Months}
dfIrrad = | pd.DataFrame(result) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import numpy as np
import pandas as pd
import pathlib
import os
###############################################################################
current_week = "30"
output_week = "/Users/christianhilscher/desktop/dynsim/output/week" + str(current_week) + "/"
pathlib.Path(output_week).mkdir(parents=True, exist_ok=True)
###############################################################################
input_path = "/Users/christianhilscher/Desktop/dynsim/input/"
output_path = "/Users/christianhilscher/Desktop/dynsim/output/"
dici_full = pd.read_pickle(output_path + "doc_full.pkl")
dici_est = | pd.read_pickle(output_path + "doc_full2.pkl") | pandas.read_pickle |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import torch
import pandas as pd
import datetime
import numpy as np
import wandb
from sklearn import metrics
from pathlib import Path
from easydict import EasyDict as edict
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from built.trainer_base import TrainerBase
from built.inference import Inference
from built.builder import Builder
from built.checkpoint_manager import CheckpointManager
from built.ensembler import Ensembler
from built.utils.util_functions import *
from built.utils.kaggle_helper import dataset_initialize
from built.registry import Registry
from text2emospch.src.splitter.traintest_splitter import SentimentDataSplitter, EmotionDataSplitter
ex = Experiment('text2emotion-speech')
ex.captured_out_filter = apply_backspaces_and_linefeeds
@ex.config
def cfg():
description = 'Tweet Sentiment Classification'
@ex.main
def main(_run, _config):
config = edict(_config)
pprint.PrettyPrinter(indent=2).pprint(config)
@ex.command
def split(_run, _config):
config = edict(_config)
config = replace_placeholder(config, config)
print(
f'Split dataset into train and test by {config.splitter.params.ratio} ratio')
builder = Builder()
splitter = builder.build_splitter(config)
splitter.split()
@ex.command
def train(_run, _config):
config = edict(_config)
config = replace_placeholder(config, config)
pprint.PrettyPrinter(indent=2).pprint(config)
run = wandb.init(project=config.wandb.project.name,
group=config.wandb.group.name, reinit=True)
config.dataset.splits = []
config.dataset.splits.append(
{'train': True, 'split': 'train', 'csv_path': config.splitter.params.train_csv_path})
config.dataset.splits.append(
{'train': False, 'split': 'test', 'csv_path': config.splitter.params.test_csv_path})
if not os.path.exists(config.train.dir):
os.makedirs(config.train.dir)
builder = Builder()
trainer = TrainerBase(config, builder, run)
score = trainer.run()
print(f'score: {score}')
run.finish()
@ex.command
def inference_sentiment(_run, _config):
config = edict(_config)
config = replace_placeholder(config, config)
# test_path = 'text2emospch/input/tweet-sentiment-extraction/test.csv'
test_path = 'text2emospch/input/tweet-sentiment-extraction/scenario.csv'
config.dataset.splits = []
config.dataset.splits.append(
{'train': False, 'split': 'test', 'csv_path': test_path})
config.dataset.params.inference = True
builder = Builder()
model_path = os.path.join(config.train.dir, config.train.name)
inferencer = Inference(config, builder, model_path)
outputs = inferencer.predict()
# outputs = torch.sigmoid(outputs).cpu().detach().numpy().tolist()
outputs = np.argmax(outputs, axis=1)
test_df = | pd.read_csv(test_path) | pandas.read_csv |
#####################################################
## PROGRAM TO IMPLEMENT KINESIS PRODUCER THAT FETCHES WEATHER INFORMATION
## AND STREAMS THE DATA INTO KINESIS STREAM
####################################################
# necessary imports
import boto3
import datetime as dt
import pandas as pd
import time
from pandas.core.tools import numeric
import requests
import json
import numpy as np
import math
# function to create a client with aws for a specific service and region
def create_client(service, region):
return boto3.client(
service,
region_name=region,
aws_access_key_id='XXXX', # replace with actual key
aws_secret_access_key='XXXX' # replace with actual key
# aws_session_token=<PASSWORD>
)
# function for generating new runtime to be used for timefield in ES
def get_date():
today = str(dt.datetime.today()) # get today as a string
year = today[:4]
month = today[5:7]
day = today[8:10]
hour = today[11:13]
minutes = today[14:16]
seconds = today[17:19]
# return a date string in the correct format for ES
return "%s/%s/%s %s:%s:%s" % (year, month, day, hour, minutes, seconds)
# function for generating new runtime to be used for timefield in ES
def format_date(date):
date = str(date) # get today as a string
year = date[:4]
month = date[5:7]
day = date[8:10]
# return a date string in the correct format for ES
return "%s/%s/%s" % (year, month, day)
def find_station_name(record, stations_info):
for _,station in stations_info.iterrows():
if station['id'] == record['station']:
return station['name']
# function to transform the data
def transform_data(data, stations_info):
# dates = data['cdatetime'] # get the datetime field
data = data.replace(np.nan, 0)
transformed = []
# loop over all records
for _, record in data.iterrows():
item = {}
# find station Id
station_name= find_station_name(record, stations_info)
# station = stations_info.loc[stations_info['name'] == record["station"]]
# print(station)
# print('*** station id', station['name'])
item['stationId'] = record['station']
item['stationName'] = station_name
if record['datatype'] == "PRCP":
item['precipitation'] = record['value']
item['snow'] = 0.0
item['minTemp'] = 0.0
item['maxTemp'] = 0.0
if record["datatype"] == "SNOW":
item['precipitation'] = 0.0
item['snow'] = record['value']
item['minTemp'] = 0.0
item['maxTemp'] = 0.0
if record["datatype"] == "TMIN":
item['precipitation'] = 0.0
item['snow'] = 0.0
item['minTemp'] = record['value']
item['maxTemp'] = 0.0
if record["datatype"] == "TMAX":
item['precipitation'] = 0.0
item['snow'] = 0.0
item['minTemp'] = 0.0
item['maxTemp'] = record['value']
item['observationDate'] = format_date(record['date']) # format as YYYY-MM-DD
item['insertedTimeStamp'] = get_date() # current timestamp
print('*** item**', item)
transformed.append(item)
# return the dataframe
return pd.DataFrame(transformed)
# fetches weather information for MD stations for month of October for GHCND data
def fetch_weather_data():
datasetid = 'GHCND'
startdate = '2021-10-01'
enddate = '2021-10-31'
locationid= 'FIPS:24' # maryland
datatypeid = 'PRCP,SNOW,TEMP,TMAX,TMIN'
limit = 1000 # api restricts the data to 1000 rows for every call
offset = 0
baseUrl = "https://www.ncdc.noaa.gov/cdo-web/api/v2/data?datasetid={datasetid}&startdate={startdate}&enddate={enddate}&locationid={locationid}&includemetadata=true&units=metric&datatypeid={datatypeid}&limit={limit}&offset={offset}"
headers = {
'token': '<KEY>'
}
url = baseUrl.format(datasetid=datasetid, startdate = startdate, enddate = enddate, locationid = locationid, datatypeid = datatypeid, limit = limit, offset = offset)
response = requests.request("GET", url, headers=headers)
results = json.loads(response.text)
totalCount = results["metadata"]["resultset"]["count"]
dataFrame = pd.DataFrame(results["results"])
pagination = math.floor(totalCount/limit + 1)
# api limits the result set to 1000
# pagination to fetch the total count
for loop in range(1, pagination):
offset = 0
offset = offset+limit*loop + 1
url = baseUrl.format(datasetid=datasetid, startdate = startdate, enddate = enddate, locationid = locationid, datatypeid = datatypeid, limit = limit, offset = offset)
temp = json.loads((requests.request("GET", url, headers=headers)).text)
tempResults = | pd.DataFrame(temp["results"]) | pandas.DataFrame |
import itertools
from collections.abc import Iterable, Sequence, Mapping
import numpy as np
import pandas as pd
class _VectorPlotter:
"""Base class for objects underlying *plot functions."""
semantics = ["x", "y"]
def establish_variables(self, data=None, **kwargs):
"""Define plot variables."""
x = kwargs.get("x", None)
y = kwargs.get("y", None)
if x is None and y is None:
self.input_format = "wide"
plot_data, variables = self.establish_variables_wideform(
data, **kwargs
)
else:
self.input_format = "long"
plot_data, variables = self.establish_variables_longform(
data, **kwargs
)
self.plot_data = plot_data
self.variables = variables
return plot_data, variables
def establish_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# TODO raise here if any kwarg values are not None,
# # if we decide for "structure-only" wide API
# First, determine if the data object actually has any data in it
empty = not len(data)
# Then, determine if we have "flat" data (a single vector)
# TODO extract this into a separate function?
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(data)
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame(columns=self.semantics)
variables = {}
elif flat:
# Coerce the data into a pandas Series such that the values
# become the y variable and the index becomes the x variable
# No other semantics are defined.
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data, name="y").copy()
flat_data.index.name = "x"
plot_data = flat_data.reset_index().reindex(columns=self.semantics)
orig_index = getattr(data, "index", None)
variables = {
"x": getattr(orig_index, "name", None),
"y": getattr(data, "name", None)
}
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: | pd.Series(val) | pandas.Series |
import logging
import os
import sys
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
import requests
import config
import imgkit
import seaborn as sns
import telegram
from requests_toolbelt import sessions
logging.basicConfig(
filename=config.LOG_DIR,
format="%(asctime)s - [%(levelname)s] - %(message)s",
level=logging.DEBUG,
)
def convert_timezone(date_obj, ref_tz, target_tz):
ref = pytz.timezone(ref_tz)
target = pytz.timezone(target_tz)
date_obj_aware = ref.localize(date_obj)
return date_obj_aware.astimezone(target)
def get_endpoint(endpoint, params=None):
with sessions.BaseUrlSession(base_url="https://api.covid19api.com/") as session:
try:
response = session.get(endpoint)
response.raise_for_status()
logging.info(f"Request successful for endpoint={endpoint}.")
except requests.exceptions.HTTPError as e:
logging.error(f"{e}. Retrying...")
response = get_endpoint(endpoint)
return response
def parse_countries(countries, sort_key, ascending=True):
table = | pd.DataFrame.from_dict(countries) | pandas.DataFrame.from_dict |
name = 'nfl_data_py'
import pandas
import numpy
import datetime
def import_pbp_data(years, columns=None, downcast=True):
"""Imports play-by-play data
Args:
years (List[int]): years to get PBP data for
columns (List[str]): only return these columns
downcast (bool): convert float64 to float32, default True
Returns:
DataFrame
"""
if not isinstance(years, (list, range)):
raise ValueError('Input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
plays = pandas.DataFrame()
url1 = r'https://github.com/nflverse/nflfastR-data/raw/master/data/play_by_play_'
url2 = r'.parquet'
for year in years:
try:
if len(columns) != 0:
data = pandas.read_parquet(url1 + str(year) + url2, columns=columns, engine='fastparquet')
else:
data = pandas.read_parquet(url1 + str(year) + url2, engine='fastparquet')
raw = pandas.DataFrame(data)
raw['season'] = year
if len(plays) == 0:
plays = raw
else:
plays = plays.append(raw)
print(str(year) + ' done.')
except:
print('Data not available for ' + str(year))
# converts float64 to float32, saves ~30% memory
if downcast:
cols = plays.select_dtypes(include=[numpy.float64]).columns
plays.loc[:, cols] = plays.loc[:, cols].astype(numpy.float32)
return plays
def import_weekly_data(years, columns=None, downcast=True):
"""Imports weekly player data
Args:
years (List[int]): years to get PBP data for
columns (List[str]): only return these columns
downcast (bool): convert float64 to float32, default True
Returns:
DataFrame
"""
if not isinstance(years, (list, range)):
raise ValueError('Input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/player_stats.parquet', engine='fastparquet')
data = data[data['season'].isin(years)]
if len(columns) > 0:
data = data[columns]
# converts float64 to float32, saves ~30% memory
if downcast:
cols = data.select_dtypes(include=[numpy.float64]).columns
data.loc[:, cols] = data.loc[:, cols].astype(numpy.float32)
return data
def import_seasonal_data(years, s_type='REG'):
if not isinstance(years, (list, range)):
raise ValueError('years input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if s_type not in ('REG','ALL','POST'):
raise ValueError('Only REG, ALL, POST allowed for s_type.')
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/player_stats.parquet', engine='fastparquet')
if s_type == 'ALL':
data = data[data['season'].isin(years)]
else:
data = data[(data['season'].isin(years)) & (data['season_type'] == s_type)]
pgstats = data[['recent_team', 'season', 'week', 'attempts', 'completions', 'passing_yards', 'passing_tds',
'passing_air_yards', 'passing_yards_after_catch', 'passing_first_downs',
'fantasy_points_ppr']].groupby(
['recent_team', 'season', 'week']).sum().reset_index()
pgstats.columns = ['recent_team', 'season', 'week', 'atts', 'comps', 'p_yds', 'p_tds', 'p_ayds', 'p_yac', 'p_fds',
'ppr_pts']
all_stats = data[
['player_id', 'player_name', 'recent_team', 'season', 'week', 'carries', 'rushing_yards', 'rushing_tds',
'rushing_first_downs', 'rushing_2pt_conversions', 'receptions', 'targets', 'receiving_yards', 'receiving_tds',
'receiving_air_yards', 'receiving_yards_after_catch', 'receiving_first_downs', 'receiving_epa',
'fantasy_points_ppr']].merge(pgstats, how='left', on=['recent_team', 'season', 'week']).fillna(0)
season_stats = all_stats.drop(['recent_team', 'week'], axis=1).groupby(
['player_id', 'player_name', 'season']).sum().reset_index()
season_stats['tgt_sh'] = season_stats['targets'] / season_stats['atts']
season_stats['ay_sh'] = season_stats['receiving_air_yards'] / season_stats['p_ayds']
season_stats['yac_sh'] = season_stats['receiving_yards_after_catch'] / season_stats['p_yac']
season_stats['wopr'] = season_stats['tgt_sh'] * 1.5 + season_stats['ay_sh'] * 0.8
season_stats['ry_sh'] = season_stats['receiving_yards'] / season_stats['p_yds']
season_stats['rtd_sh'] = season_stats['receiving_tds'] / season_stats['p_tds']
season_stats['rfd_sh'] = season_stats['receiving_first_downs'] / season_stats['p_fds']
season_stats['rtdfd_sh'] = (season_stats['receiving_tds'] + season_stats['receiving_first_downs']) / (
season_stats['p_tds'] + season_stats['p_fds'])
season_stats['dom'] = (season_stats['ry_sh'] + season_stats['rtd_sh']) / 2
season_stats['w8dom'] = season_stats['ry_sh'] * 0.8 + season_stats['rtd_sh'] * 0.2
season_stats['yptmpa'] = season_stats['receiving_yards'] / season_stats['atts']
season_stats['ppr_sh'] = season_stats['fantasy_points_ppr'] / season_stats['ppr_pts']
data.drop(['recent_team', 'week'], axis=1, inplace=True)
szn = data.groupby(['player_id', 'player_name', 'season', 'season_type']).sum().reset_index().merge(
data[['player_id', 'season', 'season_type']].groupby(['player_id', 'season']).count().reset_index().rename(
columns={'season_type': 'games'}), how='left', on=['player_id', 'season'])
szn = szn.merge(season_stats[['player_id', 'season', 'tgt_sh', 'ay_sh', 'yac_sh', 'wopr', 'ry_sh', 'rtd_sh',
'rfd_sh', 'rtdfd_sh', 'dom', 'w8dom', 'yptmpa', 'ppr_sh']], how='left',
on=['player_id', 'season'])
return szn
def see_pbp_cols():
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/play_by_play_2020.parquet', engine='fastparquet')
cols = data.columns
return cols
def see_weekly_cols():
data = pandas.read_parquet(r'https://github.com/nflverse/nflfastR-data/raw/master/data/player_stats.parquet', engine='fastparquet')
cols = data.columns
return cols
def import_rosters(years, columns=None):
if not isinstance(years, (list, range)):
raise ValueError('years input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
if columns is None:
columns = []
rosters = []
for y in years:
temp = pandas.read_csv(r'https://github.com/mrcaseb/nflfastR-roster/blob/master/data/seasons/roster_' + str(y)
+ '.csv?raw=True', low_memory=False)
rosters.append(temp)
rosters = pandas.DataFrame(pandas.concat(rosters)).rename(
columns={'full_name': 'player_name', 'gsis_id': 'player_id'})
rosters.drop_duplicates(subset=['season', 'player_name', 'position', 'player_id'], keep='first', inplace=True)
if len(columns) > 0:
rosters = rosters[columns]
def calc_age(x):
ca = pandas.to_datetime(x[0])
bd = pandas.to_datetime(x[1])
return ca.year - bd.year + numpy.where(ca.month > bd.month, 0, -1)
if 'birth_date' in columns and 'current_age' in columns:
rosters['current_age'] = rosters['season'].apply(lambda x: datetime.datetime(int(x), 9, 1))
rosters['age'] = rosters[['current_age', 'birth_date']].apply(calc_age, axis=1)
rosters.drop(['current_age'], axis=1, inplace=True)
rosters.dropna(subset=['player_id'], inplace=True)
return rosters
def import_team_desc():
df = pandas.read_csv(r'https://github.com/nflverse/nflfastR-data/raw/master/teams_colors_logos.csv')
return df
def import_schedules(years):
if not isinstance(years, (list, range)):
raise ValueError('Input must be list or range.')
if min(years) < 1999:
raise ValueError('Data not available before 1999.')
scheds = pd.DataFrame()
for x in years:
try:
temp = pandas.read_csv(r'https://raw.githubusercontent.com/cooperdff/nfl_data_py/main/data/schedules//' + str(x) + '.csv').drop('Unnamed: 0', axis=1)
scheds = scheds.append(temp)
except:
print('Data not available for ' + str(x))
return scheds
def import_win_totals(years):
if not isinstance(years, (list, range)):
raise ValueError('years variable must be list or range.')
df = | pandas.read_csv(r'https://raw.githubusercontent.com/nflverse/nfldata/master/data/win_totals.csv') | pandas.read_csv |
"""
system.py
Handles the system class for openMM
"""
# Global imports
import openmm
import openmm.app
from simtk import unit
import numpy as np
import pandas
import sklearn.decomposition
import configparser
import prody
import scipy.spatial.distance as sdist
from . import utils
__author__ = '<NAME>'
__version__ = '0.2'
__location__ = openmm.os.path.realpath(
openmm.os.path.join(openmm.os.getcwd(), openmm.os.path.dirname(__file__)))
_ef = 1 * unit.kilocalorie / unit.kilojoule # energy scaling factor
_df = 1 * unit.angstrom / unit.nanometer # distance scaling factor
_af = 1 * unit.degree / unit.radian # angle scaling factor
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from <NAME>"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
class SystemData:
"""
A class to store the system information, including atoms, coordinates and topology
"""
def __init__(self, atoms, bonds=None, angles=None, dihedrals=None, impropers=None):
self.atoms = atoms
self.atoms.index = np.arange(1, len(self.atoms) + 1)
self.masses = atoms[['type', 'mass']].drop_duplicates()
self.masses.index = np.arange(1, len(self.masses) + 1)
self.n_atoms = len(self.atoms)
self.n_atomtypes = len(self.masses)
if bonds is not None:
self.bonds = bonds
self.bonds.index = np.arange(1, len(self.bonds) + 1)
self.bondtypes = bonds[['type', 'x0', 'k']].drop_duplicates()
self.bondtypes.index = np.arange(1, len(self.bondtypes) + 1)
self.n_bonds = len(self.bonds)
self.n_bondtypes = len(self.bondtypes)
else:
self.bonds = pandas.DataFrame()
self.bondtypes = pandas.DataFrame()
self.n_bonds = 0
self.n_bondtypes = 0
if angles is not None:
self.angles = angles
self.angles.index = np.arange(1, len(self.angles) + 1)
self.angletypes = angles[['type', 'x0', 'k']].drop_duplicates()
self.angletypes.index = np.arange(1, len(self.angletypes) + 1)
self.n_angles = len(self.angles)
self.n_angletypes = len(self.angletypes)
else:
self.angles = | pandas.DataFrame() | pandas.DataFrame |
from utils.qSLP import qSLP
from qiskit.utils import QuantumInstance
from qiskit import Aer, QuantumCircuit
from utils.data_visualization import *
from utils.Utils_pad import padding
from utils.import_data import get_dataset
from qiskit.circuit.library import ZZFeatureMap, ZFeatureMap
from qiskit.circuit.library import RealAmplitudes
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, execute, BasicAer
import pickle
from utils.Utils import get_params, parity
from sklearn.metrics import accuracy_score
import pandas as pd
import sys
def get_quantum_instance():
IBMQ.load_account() # Load account from disk
provider = IBMQ.get_provider(hub='ibm-q')
small_devices = provider.backends(filters=lambda x: x.configuration().n_qubits == 5
and not x.configuration().simulator
and x.status().operational== True)
least_busy(small_devices)
backend = least_busy(small_devices)
# Comment to run on real devices
# backend = Aer.get_backend('aer_simulator')
return QuantumInstance(backend, shots=1024)
def main(path_results, path_models, path_save):
path_res = path_results
datasets = ["iris01","MNIST09", "MNIST38", "iris12", "iris02"]
for dataset in datasets:
qinstance = get_quantum_instance()
X_train, X_test, Y_train, Y_test = get_dataset(dataset)
X_test_pad = padding(X_test)
for d in range(1,4):
# Create model
model_name = f"pad_qSLP_{d}"
print(model_name)
params = get_params(model_name, dataset)
model = qSLP(d, True)
qc, sp_par, ansatz_par = model.get_full_circ()
# Set params
weights = dict(zip(ansatz_par, params))
qc = qc.bind_parameters(weights)
ris = []
# Execute tests
for i in range(X_test.shape[0]):
inp = dict(zip(sp_par, X_test_pad[i]))
q = qc.bind_parameters(inp)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(res.get_counts())
# Process and save results
ris = [int(max(el, key=el.get)) for el in ris]
acc = accuracy_score(ris, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model
model_name = f"sdq_qSLP_{d}"
print(model_name)
params = get_params(model_name, dataset)
model = qSLP(d, False)
qc, sp_par, ansatz_par = model.get_full_circ()
# Set params
weights = dict(zip(ansatz_par, params))
qc = qc.bind_parameters(weights)
ris = []
# Execute circuit
for i in range(X_test.shape[0]):
inp = dict(zip(sp_par, X_test[i]))
q = qc.bind_parameters(inp)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(res.get_counts())
# Process and save results
ris = [int(max(el, key=el.get)) for el in ris]
acc = accuracy_score(ris, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model qnnC_v1
model_name = "qNNC_v1"
print(model_name)
tot_qubit = 2
feature_map = ZZFeatureMap(feature_dimension=2,
reps=1, entanglement='linear')
ansatz = RealAmplitudes(2, reps=1)
interpreter = parity
qc = QuantumCircuit(tot_qubit)
qc.append(feature_map, range(tot_qubit))
qc.append(ansatz, range(tot_qubit))
qc.measure_all()
params = get_params(model_name, dataset)
weights = dict(zip(ansatz.parameters, params))
qc = qc.bind_parameters(weights)
ris = []
for i in range(X_test.shape[0]):
weigths = dict(zip(feature_map.parameters, X_test[i]))
q = qc.bind_parameters(weigths)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(max(res.get_counts(), key=res.get_counts().get).count('1') % 2)
acc = accuracy_score(ris, Y_test)
#acc = accuracy_score([max(el, key=el.get).count('1') % 2 for el in ris], Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model qnnC_v2
model_name = "qNNC_v2"
print(model_name)
tot_qubit = 2
feature_map = ZFeatureMap(feature_dimension=2,
reps=1)
ansatz = RealAmplitudes(2, reps=2)
interpreter = parity
qc = QuantumCircuit(tot_qubit)
qc.append(feature_map, range(tot_qubit))
qc.append(ansatz, range(tot_qubit))
qc.measure_all()
params = get_params(model_name, dataset)
weights = dict(zip(ansatz.parameters, params))
qc = qc.bind_parameters(weights)
ris = []
for i in range(X_test.shape[0]):
weigths = dict(zip(feature_map.parameters, X_test[i]))
q = qc.bind_parameters(weigths)
res = execute(q, qinstance.backend, shots=1024).result()
ris.append(max(res.get_counts(), key=res.get_counts().get).count('1') % 2)
acc = accuracy_score(ris, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = pd.DataFrame(result)
res.to_csv(path_save, mode = "a", header=False, index = False)
# Create model QSVC
model_name = "QSVC"
print(model_name)
best_df = pd.read_csv("results/test_simulation/simulated_best.csv")
best_qsvc = best_df[best_df["model"] == model_name]
k = best_qsvc[best_qsvc["dataset"] == dataset]["k"].item()
loaded_model = pickle.load(open(f"results/training/qsvm/{model_name}_{dataset}_{k}.sav", 'rb'))
rus= loaded_model.predict(X_test)
acc = accuracy_score(rus, Y_test)
result = {
"model": [model_name],
"real_dev_score" : [acc]
}
res = | pd.DataFrame(result) | pandas.DataFrame |
#Find which objects are bad and low based on various cuts through the data. Output this as a dataframe containing True False for every object in every line
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from astropy.stats import biweight_midvariance
from matplotlib.font_manager import FontProperties
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
qualout = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#The location with the file for all of our data
outpath = '/Users/blorenz/COSMOS/COSMOSData/lineflux_red.txt'
#Path for outdated avs, computed using balmer ratio
avpath = '/Users/blorenz/COSMOS/COSMOSData/balmer_avs.txt'
av_df = ascii.read(avpath).to_pandas()
#The location to store the scale and its stddev of each line
scaledata = '/Users/blorenz/COSMOS/COSMOSData/scales.txt'
#Read in the scale of the lines
#scale_df = ascii.read(scaledata).to_pandas()
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}
#File with the error array
errreddatapath = '/Users/blorenz/COSMOS/COSMOSData/errs_red.txt'
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Check if bpt correlates with stellar mass
#The location of the muzzin et al data:
mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat'
#Read in the muzzin data
mdata = ascii.read(mdatapath).to_pandas()
mdata = mdata.rename(columns={'ID':'OBJID'})
fluxdata = | pd.merge(fluxdata,mdata) | pandas.merge |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from math_helpers.constants import *
from traj import lambert
from traj.meeus_alg import meeus
from traj.conics import get_rv_frm_elements
from traj.bplane import bplane_vinf
import pandas as pd
from math_helpers.time_systems import get_JD, cal_from_jd
from math_helpers import matrices as mat
from math_helpers.vectors import vcrossv
def launchwindows(departure_planet, departure_date, arrival_planet,
arrival_window, dm=None, center='sun', run_test=False):
"""return plots of c3 and vinf values for a 0 rev lambert transfer
between a departure and arrival planet within a given arrival window.
:param departure_planet: name of departure planet (str)
:param departure_date: date of departure ('yyyy-mm-dd')
:param arrival_planet: name of arrival planet (str)
:param arrival_window: list with begin and end arrival date window
['yyyy-mm-dd', 'yyyy-mm-dd']
:param dm: direction of motion (optional); if None, then the script
will auto-compute direction based on the change in true
anomaly
:param center: point where both planets are orbiting about;
default = 'sun'
:param run_test: run unit tests with lower days and bypass plots
"""
# reinitializing
dep_date = departure_date
dp = departure_planet
ap = arrival_planet
# get departure julian dates
dep_date = pd.to_datetime(dep_date)
dep_JD = get_JD(dep_date.year, dep_date.month, dep_date.day, \
dep_date.hour, dep_date.minute, dep_date.second)
days = 1000
if run_test:
days = 300
# get arrival windows
arrival_window = pd.to_datetime(arrival_window)
arrival_window = np.linspace(arrival_window[0].value, arrival_window[1].value, days)
arrival_window = pd.to_datetime(arrival_window)
# get state of departure planet
dep_elements = meeus(dep_JD, planet=dp)
s_dep_planet = get_rv_frm_elements(dep_elements, center=center, method='sma')
r_dep_planet = s_dep_planet[:3]
v_dep_planet = s_dep_planet[3:6]
# initializing arrival dataframe
columns = ['tof_d', 'TOF', 'v_inf_dep', 'v_inf_arr', 'c3', 'vinf']
transfer = pd.DataFrame(index=arrival_window, columns=columns)
for date in arrival_window:
transfer['tof_d'][date] = date.date()
# get state of arrival planet at the current arrival date
arrival_jdate = get_JD(date.year, date.month, date.day, \
date.hour, date.minute, date.second)
arr_elements = meeus(arrival_jdate, planet=ap)
s_arrival = get_rv_frm_elements(arr_elements, center=center, method='sma')
r_arr_planet = s_arrival[:3]
v_arr_planet = s_arrival[3:6]
# convert date since departure to seconds
transfer['TOF'][date] = (date - dep_date).total_seconds()
# compute lambert solution at current arrival date
vi, vf = lambert.lambert_univ(r_dep_planet, r_arr_planet, \
transfer['TOF'][date], dm=dm,
center=center,
dep_planet=dp, arr_planet=ap)
# compute hyperbolic departure/arrival velocities
transfer['v_inf_dep'][date] = vi - v_dep_planet
transfer['v_inf_arr'][date] = vf - v_arr_planet
# compute c3 values at departure and v_inf values at arrival
transfer['c3'][date] = norm(transfer['v_inf_dep'][date])**2
transfer['vinf'][date] = norm(transfer['v_inf_arr'][date])
# get values and dates of min c3/v_inf
minc3 = transfer['c3'].min()
minc3_date = transfer['TOF'][transfer['c3'] == minc3]
minvinf = transfer['vinf'].min()
minvinf_date = transfer['TOF'][transfer['vinf'] == minvinf]
print(f'(a) min c3 = {minc3} km2/s2 on {minc3_date.index[0]}'
f' // {transfer.loc[transfer["c3"] == minc3, "tof_d"][0]}')
print(f'(b) min v_inf = {minvinf} km/s on {minvinf_date.index[0]}'
f' // {transfer.loc[transfer["vinf"] == minvinf, "tof_d"][0]}')
if run_test:
return minc3, minvinf, \
str(minc3_date.index[0])[:10], str(minvinf_date.index[0])[:10]
# # assuming positions of planets are in the ecliptic,
# # determine Type 1 or 2 transfer
tanom1 = np.arctan2(r_dep_planet[1], r_dep_planet[0])
tanom2 = np.arctan2(r_arr_planet[1], r_arr_planet[0])
dtanom = tanom2 - tanom1
if dtanom > np.pi:
ttype = '2'
elif dtanom < np.pi:
ttype = '1'
# plots
fig=plt.figure(figsize=(12,6))
plt.style.use('seaborn')
# c3 vs tof
ax=fig.add_subplot(121)
ax.set_xlabel(f"days past departure ({departure_date})")
ax.set_ylabel("c3, km2/s2")
ax.set_title(f"c3 versus time of flight, Type {ttype}")
ax.plot(transfer['TOF']/3600/24, transfer['c3'], label='departure_c3')
ax.plot(minc3_date.values/3600/24, minc3, 'bo', markersize=12, label='min_c3')
# v_inf vs tof
ax2=fig.add_subplot(122)
ax2.set_xlabel(f"days past departure ({departure_date})")
ax2.set_ylabel(f"v_inf at {ap}, km/s")
ax2.set_title(f"v_inf at {ap} versus time of flight, Type {ttype}")
ax2.plot(transfer['TOF']/3600/24, transfer['vinf'], label='arrival_vinf')
ax2.plot(minvinf_date.values/3600/24, minvinf, 'ro', markersize=12, label='min_vinf')
ax.legend()
ax2.legend()
fig.tight_layout(pad=4.0)
plt.show()
def get_porkchops(dep_jd_init, dep_jd_fin, arr_jd_init, arr_jd_fin,
dp='earth', ap='jupiter', center='sun',
contour_tof=None, contour_c3=None,
contour_vinf=None, contour_vinf_out=None,
plot_tar=False, tar_dep=None, tar_arr=None,
shade_c3=False, shade_tof=False, shade_vinf_arr=False,
shade_vinf_range=None, shade_tof_range=None, fine_search=True):
"""generates a porkchop plot for a given launch and arrival window.
:param dep_jd_init: initial departure date (JD)
:param dep_jd_fin: final departure date (JD)
:param arr_jd_init: initial arrival date (JD)
:param arr_jd_fin: final arrival date (JD)
:param dp: departure planet
:param ap: arrival planet
:param center: center body of orbit; default='sun'
:param contour_tof: array of tof contours to plot
:param contour_c3: array of launch c3 contours to plot (optional)
:param contour_vinf: array of vinf inbound contours to plot
:param contour_vinf_out: array of vinf outbound contours to plot (optional)
:param plot_tar: plot target point (True); default=False
:param tar_dep: target departure date (JD)
:param tar_arr: target arrival date (JD)
:param shade_c3: option to shade certain c3 contours (True)
:param shade_tof: option to shade certain tof contours (True)
:param shade_vinf_arr: option to shade certain arrival vinf contours (True)
:param shade_vinf_range: array of arrival vinf range to shade
:param shade_tof_range: array of time of flight range to shade
:return df: if contour_c3 is present, [df_tof, df_c3, df_vinf_arr];
if contour_vinf_out is present,
[df_tof, df_vinf_dep, df_vinf_arr]
"""
plot_c3 = True
plot_vinf_out = True
if contour_c3 is None:
plot_c3 = False
if contour_vinf_out is None:
plot_vinf_out = False
if tar_dep is None:
pass
else:
print('segment tof (days):',tar_arr-tar_dep)
print('target departure (cal):', cal_from_jd(tar_dep, rtn='string'), '(jd)', tar_dep)
print('target arrival (cal):', cal_from_jd(tar_arr, rtn='string'), '(jd)', tar_arr)
# departure and arrival dates
dep_date_initial_cal = cal_from_jd(dep_jd_init, rtn='string')
arr_date_initial_cal = cal_from_jd(arr_jd_init, rtn='string')
dep_date_initial_cal = pd.to_datetime(dep_date_initial_cal)
arr_date_initial_cal = pd.to_datetime(arr_date_initial_cal)
# time windows
delta_dep = dep_jd_fin - dep_jd_init
delta_arr = arr_jd_fin - arr_jd_init
if fine_search:
delta = 1
else:
delta = 5
departure_window = np.linspace(dep_jd_init, dep_jd_fin, int(delta_dep/delta))
arrival_window = np.linspace(arr_jd_init, arr_jd_fin, int(delta_arr/delta))
# generate dataframes for c3, time of flight, and dep/arrival v_inf
df_c3 = pd.DataFrame(index=arrival_window, columns=departure_window)
df_tof = pd.DataFrame(index=arrival_window, columns=departure_window)
df_vinf_arr = pd.DataFrame(index=arrival_window, columns=departure_window)
df_vinf_dep = pd.DataFrame(index=arrival_window, columns=departure_window)
# loop through launch dates
for dep_JD in departure_window:
for arr_JD in arrival_window:
tof_s = (arr_JD-dep_JD)*3600*24
s_planet1 = meeus(dep_JD, planet=dp, rtn='states', ref_rtn=center)
s_planet2 = meeus(arr_JD, planet=ap, rtn='states', ref_rtn=center)
vi, vf = lambert.lambert_univ(s_planet1[:3], s_planet2[:3], tof_s,
center=center, dep_planet=dp, arr_planet=ap)
c3 = norm(vi-s_planet1[3:6])**2
vinf_arr = norm(vf - s_planet2[3:6])
vinf_dep = norm(vi - s_planet1[3:6])
df_c3[dep_JD][arr_JD] = c3
df_tof[dep_JD][arr_JD] = arr_JD-dep_JD
df_vinf_arr[dep_JD][arr_JD] = vinf_arr
df_vinf_dep[dep_JD][arr_JD] = vinf_dep
# generate contour plots
fig, ax = plt.subplots(figsize=(10,8))
plt.style.use('default')
CS_tof = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_tof, linewidths=0.5, colors=('gray'), levels=contour_tof)
CS_vinf_arr = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_vinf_arr, linewidths=0.5, colors=('g'), levels=contour_vinf)
if plot_vinf_out:
CS_vinf_dep = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_vinf_dep, linewidths=0.5, colors=('b'), levels=contour_vinf_out)
if plot_c3:
CS_c3 = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_c3, linewidths=0.5, colors=('b'), levels=contour_c3)
ax.set_title(f'pork chop plot from {dp} to {ap}')
ax.set_xlabel(f'{dp} departure dates - days since {dep_date_initial_cal}')
ax.set_ylabel(f'{ap} arrival dates - days since {arr_date_initial_cal}')
ax.clabel(CS_tof, inline=0.2, fmt="%.0f", fontsize=10)
ax.clabel(CS_vinf_arr, inline=0.2, fmt="%.1f", fontsize=10)
h1,_ = CS_tof.legend_elements()
h3,_ = CS_vinf_arr.legend_elements()
if plot_c3:
ax.clabel(CS_c3, inline=0.2, fmt="%.1f", fontsize=10)
h2,_ = CS_c3.legend_elements()
ax.legend([h1[0], h2[0], h3[0]], ['TOF, days', 'c3, km2/s2', 'v_inf_arrival, km/s'],
loc=2, facecolor='white', framealpha=1)
elif plot_vinf_out:
ax.clabel(CS_vinf_dep, inline=0.2, fmt="%.1f", fontsize=10)
h2,_ = CS_vinf_dep.legend_elements()
ax.legend([h1[0], h2[0], h3[0]], ['TOF, days', 'vinf_departure, km/s', 'v_inf_arrival, km/s'],
loc=2, facecolor='white', framealpha=1)
if plot_tar:
plt.scatter(tar_dep-dep_jd_init, tar_arr-arr_jd_init, linewidths=18, color='orange')
# shade region within these bounds
if shade_vinf_arr:
CS_vinf_arr = ax.contourf(departure_window-departure_window[0],
arrival_window-arrival_window[0], df_vinf_arr,
colors=('g'), levels=shade_vinf_range, alpha=0.3)
if shade_tof:
CS_tof = ax.contourf(departure_window-departure_window[0],
arrival_window-arrival_window[0], df_tof,
colors=('black'), levels=shade_tof_range, alpha=0.3)
plt.savefig(f'porkschops_{dp}_{ap}.png')
plt.show()
if plot_c3:
return [df_tof, df_c3, df_vinf_arr]
elif plot_vinf_out:
return [df_tof, df_vinf_dep, df_vinf_arr]
else:
return [df_tof, df_vinf_arr]
def run_pcp_search(dep_jd_init, dep_jd_fin, pl2_jd_init, pl2_jd_fin, pl3_jd_init, pl3_jd_fin,
dpl='earth', pl2='jupiter', pl3='pluto', center='sun',
c3_max=None, vinf_max=None, vinf_tol=None, rp_min=None, fine_search=False):
"""generates a porkchop plot for a given launch and arrival window.
:param dep_jd_init: initial departure date of launch planet (planet 1) (JD)
:param dep_jd_fin: final departure date of launch planet (planet 1) (JD)
:param pl2_jd_init: initial arrival date of flyby planet (planet 2) (JD)
:param pl2_jd_fin: final arrival date of flyby planet (planet 2) (JD)
:param pl3_jd_init: initial arrival date of arrival planet (planet 3) (JD)
:param pl3_jd_fin: final arrival date of arrival planet (planet 3) (JD)
:param dpl: name of departure planet (planet 1)
:param pl2: name of flyby planet (planet 2)
:param pl3: name of arrival planet (planet 3)
:param center: center body of orbit; default='sun'
:param c3_max: maximum launch c3 constraint (km2/s2)
:param vinf_max: maximum final arrival vinf at planet 3 (km/s)
:param vinf_tol: maximum allowable delta-vinf inbound/outbound of flyby (km/s)
:param rp_min: minimum radius of flyby (km)
:param fine_search: option between coarse search of 3 days interval (False);
or fine search of 0.8 days interval (True)
:return df: [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, ...
dfpl3_tof, dfpl3_vinf_in, dfpl3_rp]
in work, need to add more robustness for constraining options
"""
# departure and arrival dates
dep_date_init_cal = pd.to_datetime(cal_from_jd(dep_jd_init, rtn='string'))
pl2_jd_init_cal = pd.to_datetime(cal_from_jd(pl2_jd_init, rtn='string'))
pl3_jd_init_cal = pd.to_datetime(cal_from_jd(pl3_jd_init, rtn='string'))
# time windows
delta_dep = dep_jd_fin - dep_jd_init
delta_pl2 = pl2_jd_fin - pl2_jd_init
delta_pl3 = pl3_jd_fin - pl3_jd_init
searchint = 3
if fine_search:
searchint = 0.8
dep_window = np.linspace(dep_jd_init, dep_jd_fin, int(delta_dep/searchint))
# print(dep_window)
pl2_window = np.linspace(pl2_jd_init, pl2_jd_fin, int(delta_pl2/searchint))
pl3_window = np.linspace(pl3_jd_init, pl3_jd_fin, int(delta_pl3/searchint))
# generate dataframes for c3, time of flight, and dep/arrival v_inf
dfpl1_c3 = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl2_tof = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl2_vinf_in = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl2_vinf_out = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl3_tof = pd.DataFrame(index=pl3_window, columns=pl2_window)
dfpl3_vinf_in = pd.DataFrame(index=pl3_window, columns=pl2_window)
dfpl3_rp = pd.DataFrame(index=pl3_window, columns=pl2_window)
# loop through launch dates
for dep_JD in dep_window:
for arr_JD in pl2_window:
tof12_s = (arr_JD-dep_JD)*3600*24
s_planet1 = meeus(dep_JD, planet=dpl, rtn='states', ref_rtn=center)
s_planet2 = meeus(arr_JD, planet=pl2, rtn='states', ref_rtn=center)
vi_seg1, vf_seg1 = lambert.lambert_univ(s_planet1[:3], s_planet2[:3], tof12_s,
center=center, dep_planet=dpl, arr_planet=pl2)
c3 = norm(vi_seg1-s_planet1[3:6])**2
if c3 < c3_max:
# print('c3', c3)
for arr2_JD in pl3_window:
tof23_s = (arr2_JD-arr_JD)*3600*24
s_planet3 = meeus(arr2_JD, planet=pl3, rtn='states', ref_rtn=center)
vi_seg2, vf_seg2 = lambert.lambert_univ(s_planet2[:3], s_planet3[:3], tof23_s,
center=center, dep_planet=pl2, arr_planet=pl3)
vinf_pl2_in = norm(vf_seg1 - s_planet2[3:6])
vinf_pl2_out = norm(vi_seg2 - s_planet2[3:6])
if abs(vinf_pl2_in-vinf_pl2_out) < vinf_tol:
# print(abs(vinf_pl2_in-vinf_pl2_out))
rp = bplane_vinf(vf_seg1, vi_seg2, center=pl2, rtn_rp=True)
if rp > rp_min:
# print('rp', rp)
vinf_pl3_in = norm(vf_seg2 - s_planet3[3:6])
if vinf_pl3_in < vinf_max:
# print('vinf_pl2_out', vinf_pl2_out)
dfpl1_c3[dep_JD][arr_JD] = c3
dfpl2_tof[dep_JD][arr_JD] = arr_JD-dep_JD
dfpl2_vinf_in[dep_JD][arr_JD] = vinf_pl2_in
dfpl2_vinf_out[dep_JD][arr_JD] = vinf_pl2_out
dfpl3_tof[arr_JD][arr2_JD] = arr2_JD-arr_JD
dfpl3_vinf_in[arr_JD][arr2_JD] = vinf_pl3_in
dfpl3_rp[arr_JD][arr2_JD] = rp
return [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl3_tof, dfpl3_vinf_in, dfpl3_rp]
def search_script_multi(dep_windows, planets, center, constraints, fine_search=False):
dep_windows_cal = []
arr_windows_cal = []
# departure and arrival dates
for window in dep_windows:
dep_windows_cal.append([pd.to_datetime(cal_from_jd(jd, rtn='string')) for jd in window])
# time windows
windows = []
searchint = 3
if fine_search:
searchint = 0.8
delta_deps = [depf - depi for depi, depf in dep_windows]
for delta, window in zip(delta_deps, dep_windows):
windows.append(np.linspace(window[0], window[1], int(delta/searchint)))
dfs = []
# generate dataframes for c3, time of flight, and dep/arrival v_inf
dfpl1_c3 = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl2_tof = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl2_vinf_in = pd.DataFrame(index=windows[1], columns=windows[0])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in]
if len(planets) >= 3:
dfpl2_vinf_out = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl2_rp = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl3_tof = pd.DataFrame(index=windows[2], columns=windows[1])
dfpl3_vinf_in = pd.DataFrame(index=windows[2], columns=windows[1])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl2_rp,
dfpl3_tof, dfpl3_vinf_in]
if len(planets) >= 4:
dfpl3_vinf_out = pd.DataFrame(index=windows[2], columns=windows[1])
dfpl3_rp = pd.DataFrame(index=windows[2], columns=windows[1])
dfpl4_tof = pd.DataFrame(index=windows[3], columns=windows[2])
dfpl4_vinf_in = pd.DataFrame(index=windows[3], columns=windows[2])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl2_rp,
dfpl3_tof, dfpl3_vinf_in, dfpl3_vinf_out, dfpl3_rp,
dfpl4_tof, dfpl4_vinf_in]
if len(planets) >= 5:
dfpl4_vinf_out = pd.DataFrame(index=windows[3], columns=windows[2])
dfpl4_rp = pd.DataFrame(index=windows[3], columns=windows[2])
dfpl5_tof = pd.DataFrame(index=windows[4], columns=windows[3])
dfpl5_vinf_in = pd.DataFrame(index=windows[4], columns=windows[3])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl2_rp,
dfpl3_tof, dfpl3_vinf_in, dfpl3_vinf_out, dfpl3_rp,
dfpl4_tof, dfpl4_vinf_in, dfpl4_vinf_out, dfpl4_rp,
dfpl5_tof, dfpl5_vinf_in]
if len(planets) == 6:
dfpl5_vinf_out = pd.DataFrame(index=windows[4], columns=windows[3])
dfpl5_rp = pd.DataFrame(index=windows[4], columns=windows[3])
dfpl6_tof = pd.DataFrame(index=windows[5], columns=windows[4])
dfpl6_vinf_in = | pd.DataFrame(index=windows[5], columns=windows[4]) | pandas.DataFrame |
# BookNLP LitBank
import pandas as pd
import csv
import os
# import own script
from hyphens import *
from check_inconsistencies import *
books_mapping = {'AliceInWonderland': '11_alices_adventures_in_wonderland',
'DavidCopperfield': '766_david_copperfield',
'Dracula': '345_dracula',
'Emma': '158_emma',
'Frankenstein': '84_frankenstein_or_the_modern_prometheus',
'HuckleberryFinn': '76_adventures_of_huckleberry_finn',
'MobyDick': '2489_moby_dick',
'OliverTwist': '730_oliver_twist',
'PrideAndPrejudice': '1342_pride_and_prejudice',
'TheCallOfTheWild': '215_the_call_of_the_wild',
'Ulysses': '4300_ulysses',
'VanityFair': '599_vanity_fair'}
directory = os.fsencode('/mnt/book-nlp/data/tokens/overlap/')
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".tokens"):
booknlp_filepath = "/mnt/book-nlp/data/tokens/overlap/" + filename
litbank_filepath = "/mnt/data/gold_standard/overlap/litbank/" + books_mapping.get(str(filename.replace('.tokens',''))) + ".tsv"
#####################################
# get output file BookNLP
current_file = | pd.read_csv(booknlp_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=["originalWord","ner"]) | pandas.read_csv |
import pandas as pd
from koapy import KiwoomOpenApiContext
from koapy.backend.cybos.CybosPlusComObject import CybosPlusComObject
kiwoom = KiwoomOpenApiContext()
cybos = CybosPlusComObject()
kiwoom.EnsureConnected()
cybos.EnsureConnected()
kiwoom_codes = kiwoom.GetCommonCodeList()
cybos_codes = cybos.GetCommonCodeList()
cybos_codes = [code[1:] for code in cybos_codes]
kiwoom_codes = pd.DataFrame(kiwoom_codes, columns=['code'])
kiwoom_codes['kiwoom'] = 'TRUE'
cybos_codes = | pd.DataFrame(cybos_codes, columns=['code']) | pandas.DataFrame |
""" Functions used in create-documentation notebook"""
import os
import pandas as pd
import numpy as np
button = ":raw-html:`❏`"
csv_header = "\n{}`\n"
csv_entry = ".. csv-table::"
csv_columns = " :header: {}{}\n"
csv_delim = " :delim: |"
csv_row = " {} | {}"
csv_singlerow = " {}"
bool_entry = ":raw-html:`❏` – {}\n"
open_question = "\n{} \n"
header_question = "\n{}\n"
insert_image = "\n.. image:: {}"
empty_field = ':raw-html:`<form><input type="text" id="fname" name="fname"><br></form>`'
def create_pages(
codebook,
waveid,
lanid,
q_ids,
q_filter,
q_groups,
q_layout,
q_text,
q_sub_text,
q_categories,
target_dir,
image_path,
):
""" Create reStructuredText files for all groups specified in q_groups. Each file holds all questions that belong to a respective group.
"""
qids = list(codebook[q_ids].unique())
data = codebook.copy()
data = data.set_index([codebook.index, q_ids])
for idx, qid in enumerate(qids):
df = data[data.index.get_level_values(q_ids) == qid]
# Create rst-file.
file_name = f"{waveid}{lanid}-{qid}"
group_name = df.loc[df.index[0], q_groups]
path = f"{target_dir}{file_name}.rst"
add_to_file(".. _" + file_name + ":", path)
add_to_file("\n \n .. role:: raw-html(raw) \n :format: html \n", path)
add_to_file(
f"`{qid}` – {group_name}\n{'='*len(file_name*2 + group_name)}", path
)
# Insert arrows to next & pervious
insert_arrows(waveid, lanid, qids, idx, path)
# Add routing if present:
if df.loc[df.index[0], q_filter] != "-":
add_to_file(
f"*Routing to the question depends on answer in:* :ref:`{waveid}{lanid}-{str(df.loc[df.index[0], q_filter])}`",
path,
)
else:
pass
if df[q_layout].all() == "open":
for i in df.index:
add_to_file(open_question.format(df.loc[i, q_text]), path)
elif df[q_layout].all() == "multi":
add_to_file(header_question.format(df.loc[df.index[0], q_text]), path)
for i in df.index:
add_to_file(bool_entry.format(df.loc[i, q_sub_text]), path)
elif df[q_layout].all() == "table":
insert_table_question(df, path, q_text, q_sub_text, q_categories)
elif df[q_layout].all() == "grid":
insert_grid_question(df, path, q_text, q_sub_text, q_categories)
elif df[q_layout].all() == "cat":
insert_cat_question(df, path, q_text, q_categories)
else:
raise ValueError(f"Unknown layout type for question {qid}.")
add_to_file(insert_image.format(f"{image_path}{waveid}-{qid}.png"), path)
# Insert arrows to next & pervious
insert_arrows(waveid, lanid, qids, idx, path)
def insert_arrows(waveid, lanid, qids, idx, path):
"""Insert arrows pointing to next and previous question."""
if idx == 0:
next_q = waveid + lanid + "-" + qids[idx + 1]
add_to_file(f"\n\n:ref:`{next_q}` :raw-html:`→` \n", path)
elif idx == (len(qids) - 1):
previous_q = f"{waveid}{lanid}-{qids[idx-1]}"
add_to_file(f"\n\n:raw-html:`←` :ref:`{previous_q}` \n", path)
else:
previous_q = f"{waveid}{lanid}-{qids[idx-1]}"
next_q = f"{waveid}{lanid}-{qids[idx+1]}"
add_to_file(
f"\n\n:raw-html:`←` :ref:`{previous_q}` | :ref:`{next_q}` :raw-html:`→` \n",
path,
)
def insert_table_question(df, path, q_text, q_sub_text, q_categories):
"""Insert question of type table with radio buttons."""
add_to_file(header_question.format(df.loc[df.index[0], q_text]), path)
add_to_file(csv_entry.format(), path)
add_to_file(csv_delim.format(), path)
add_to_file(csv_columns.format(",", df.loc[df.index[0], q_categories]), path)
for i in df.index:
items = df.loc[i, q_categories].count(",")
add_to_file(
csv_row.format(df.loc[i, q_sub_text], (button + "|") * (items) + button),
path,
)
def insert_grid_question(df, path, q_text, q_sub_text, q_categories):
"""Insert question of type grid with entry fields."""
add_to_file(header_question.format(df.loc[df.index[0], q_text]), path)
add_to_file(csv_entry.format(), path)
if | pd.isna(df.loc[df.index[0], q_categories]) | pandas.isna |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scheduler.GOBI import GOBIScheduler
plt.style.use(['science'])
plt.rcParams["text.usetex"] = False
class Stats():
def __init__(self, Environment, WorkloadModel, Datacenter, Scheduler):
self.env = Environment
self.env.stats = self
self.workload = WorkloadModel
self.datacenter = Datacenter
self.scheduler = Scheduler
self.simulated_scheduler = GOBIScheduler('energy_latency_'+str(self.datacenter.num_hosts))
self.simulated_scheduler.env = self.env
self.initStats()
def initStats(self):
self.hostinfo = []
self.workloadinfo = []
self.activecontainerinfo = []
self.allcontainerinfo = []
self.metrics = []
self.schedulerinfo = []
def saveHostInfo(self):
hostinfo = dict()
hostinfo['interval'] = self.env.interval
hostinfo['cpu'] = [host.getCPU() for host in self.env.hostlist]
hostinfo['numcontainers'] = [len(self.env.getContainersOfHost(i)) for i,host in enumerate(self.env.hostlist)]
hostinfo['power'] = [host.getPower() for host in self.env.hostlist]
hostinfo['baseips'] = [host.getBaseIPS() for host in self.env.hostlist]
hostinfo['ipsavailable'] = [host.getIPSAvailable() for host in self.env.hostlist]
hostinfo['ipscap'] = [host.ipsCap for host in self.env.hostlist]
hostinfo['apparentips'] = [host.getApparentIPS() for host in self.env.hostlist]
hostinfo['ram'] = [host.getCurrentRAM() for host in self.env.hostlist]
hostinfo['ramavailable'] = [host.getRAMAvailable() for host in self.env.hostlist]
hostinfo['disk'] = [host.getCurrentDisk() for host in self.env.hostlist]
hostinfo['diskavailable'] = [host.getDiskAvailable() for host in self.env.hostlist]
self.hostinfo.append(hostinfo)
def saveWorkloadInfo(self, deployed, migrations):
workloadinfo = dict()
workloadinfo['interval'] = self.env.interval
workloadinfo['totalcontainers'] = len(self.workload.createdContainers)
if self.workloadinfo:
workloadinfo['newcontainers'] = workloadinfo['totalcontainers'] - self.workloadinfo[-1]['totalcontainers']
else:
workloadinfo['newcontainers'] = workloadinfo['totalcontainers']
workloadinfo['deployed'] = len(deployed)
workloadinfo['migrations'] = len(migrations)
workloadinfo['inqueue'] = len(self.workload.getUndeployedContainers())
self.workloadinfo.append(workloadinfo)
def saveContainerInfo(self):
containerinfo = dict()
containerinfo['interval'] = self.env.interval
containerinfo['activecontainers'] = self.env.getNumActiveContainers()
containerinfo['ips'] = [(c.getBaseIPS() if c else 0) for c in self.env.containerlist]
containerinfo['apparentips'] = [(c.getApparentIPS() if c else 0) for c in self.env.containerlist]
containerinfo['ram'] = [(c.getRAM() if c else 0) for c in self.env.containerlist]
containerinfo['disk'] = [(c.getDisk() if c else 0) for c in self.env.containerlist]
containerinfo['creationids'] = [(c.creationID if c else -1) for c in self.env.containerlist]
containerinfo['hostalloc'] = [(c.getHostID() if c else -1) for c in self.env.containerlist]
containerinfo['active'] = [(c.active if c else False) for c in self.env.containerlist]
self.activecontainerinfo.append(containerinfo)
def saveAllContainerInfo(self):
containerinfo = dict()
allCreatedContainers = [self.env.getContainerByCID(cid) for cid in list(np.where(self.workload.deployedContainers)[0])]
containerinfo['interval'] = self.env.interval
if self.datacenter.__class__.__name__ == 'Datacenter':
containerinfo['application'] = [self.env.getContainerByCID(cid).application for cid in list(np.where(self.workload.deployedContainers)[0])]
containerinfo['ips'] = [(c.getBaseIPS() if c.active else 0) for c in allCreatedContainers]
containerinfo['create'] = [(c.createAt) for c in allCreatedContainers]
containerinfo['start'] = [(c.startAt) for c in allCreatedContainers]
containerinfo['destroy'] = [(c.destroyAt) for c in allCreatedContainers]
containerinfo['apparentips'] = [(c.getApparentIPS() if c.active else 0) for c in allCreatedContainers]
containerinfo['ram'] = [(c.getRAM() if c.active else 0) for c in allCreatedContainers]
containerinfo['disk'] = [(c.getDisk() if c.active else 0) for c in allCreatedContainers]
containerinfo['hostalloc'] = [(c.getHostID() if c.active else -1) for c in allCreatedContainers]
containerinfo['active'] = [(c.active) for c in allCreatedContainers]
self.allcontainerinfo.append(containerinfo)
def saveMetrics(self, destroyed, migrations):
metrics = dict()
metrics['interval'] = self.env.interval
metrics['numdestroyed'] = len(destroyed)
metrics['nummigrations'] = len(migrations)
metrics['energy'] = [host.getPower()*self.env.intervaltime for host in self.env.hostlist]
metrics['energytotalinterval'] = np.sum(metrics['energy'])
metrics['energypercontainerinterval'] = np.sum(metrics['energy'])/self.env.getNumActiveContainers()
metrics['responsetime'] = [c.totalExecTime + c.totalMigrationTime for c in destroyed]
metrics['avgresponsetime'] = np.average(metrics['responsetime']) if len(destroyed) > 0 else 0
metrics['migrationtime'] = [c.totalMigrationTime for c in destroyed]
metrics['avgmigrationtime'] = np.average(metrics['migrationtime']) if len(destroyed) > 0 else 0
metrics['slaviolations'] = len(np.where([c.destroyAt > c.sla for c in destroyed]))
metrics['slaviolationspercentage'] = metrics['slaviolations'] * 100.0 / len(destroyed) if len(destroyed) > 0 else 0
metrics['waittime'] = [c.startAt - c.createAt for c in destroyed]
metrics['energytotalinterval_pred'], metrics['avgresponsetime_pred'] = self.runSimulationGOBI()
self.metrics.append(metrics)
def saveSchedulerInfo(self, selectedcontainers, decision, schedulingtime):
schedulerinfo = dict()
schedulerinfo['interval'] = self.env.interval
schedulerinfo['selection'] = selectedcontainers
schedulerinfo['decision'] = decision
schedulerinfo['schedule'] = [(c.id, c.getHostID()) if c else (None, None) for c in self.env.containerlist]
schedulerinfo['schedulingtime'] = schedulingtime
if self.datacenter.__class__.__name__ == 'Datacenter':
schedulerinfo['migrationTime'] = self.env.intervalAllocTimings[-1]
self.schedulerinfo.append(schedulerinfo)
def saveStats(self, deployed, migrations, destroyed, selectedcontainers, decision, schedulingtime):
self.saveHostInfo()
self.saveWorkloadInfo(deployed, migrations)
self.saveContainerInfo()
self.saveAllContainerInfo()
self.saveMetrics(destroyed, migrations)
self.saveSchedulerInfo(selectedcontainers, decision, schedulingtime)
def runSimpleSimulation(self, decision):
host_alloc = []; container_alloc = [-1] * len(self.env.hostlist)
for i in range(len(self.env.hostlist)):
host_alloc.append([])
for c in self.env.containerlist:
if c and c.getHostID() != -1:
host_alloc[c.getHostID()].append(c.id)
container_alloc[c.id] = c.getHostID()
decision = self.simulated_scheduler.filter_placement(decision)
for cid, hid in decision:
if self.env.getPlacementPossible(cid, hid) and container_alloc[cid] != -1:
host_alloc[container_alloc[cid]].remove(cid)
host_alloc[hid].append(cid)
energytotalinterval_pred = 0
for hid, cids in enumerate(host_alloc):
ips = 0
for cid in cids: ips += self.env.containerlist[cid].getApparentIPS()
energytotalinterval_pred += self.env.hostlist[hid].getPowerFromIPS(ips)
return energytotalinterval_pred*self.env.intervaltime, max(0, np.mean([metric_d['avgresponsetime'] for metric_d in self.metrics[-5:]]))
def runSimulationGOBI(self):
host_alloc = []; container_alloc = [-1] * len(self.env.hostlist)
for i in range(len(self.env.hostlist)):
host_alloc.append([])
for c in self.env.containerlist:
if c and c.getHostID() != -1:
host_alloc[c.getHostID()].append(c.id)
container_alloc[c.id] = c.getHostID()
selected = self.simulated_scheduler.selection()
decision = self.simulated_scheduler.filter_placement(self.simulated_scheduler.placement(selected))
for cid, hid in decision:
if self.env.getPlacementPossible(cid, hid) and container_alloc[cid] != -1:
host_alloc[container_alloc[cid]].remove(cid)
host_alloc[hid].append(cid)
energytotalinterval_pred = 0
for hid, cids in enumerate(host_alloc):
ips = 0
for cid in cids: ips += self.env.containerlist[cid].getApparentIPS()
energytotalinterval_pred += self.env.hostlist[hid].getPowerFromIPS(ips)
return energytotalinterval_pred*self.env.intervaltime, max(0, np.mean([metric_d['avgresponsetime'] for metric_d in self.metrics[-5:]]))
########################################################################################################
def generateGraphsWithInterval(self, dirname, listinfo, obj, metric, metric2=None):
fig, axes = plt.subplots(len(listinfo[0][metric]), 1, sharex=True, figsize=(4, 0.5*len(listinfo[0][metric])))
title = obj + '_' + metric + '_with_interval'
totalIntervals = len(listinfo)
x = list(range(totalIntervals))
metric_with_interval = []; metric2_with_interval = []
ylimit = 0; ylimit2 = 0
for hostID in range(len(listinfo[0][metric])):
metric_with_interval.append([listinfo[interval][metric][hostID] for interval in range(totalIntervals)])
ylimit = max(ylimit, max(metric_with_interval[-1]))
if metric2:
metric2_with_interval.append([listinfo[interval][metric2][hostID] for interval in range(totalIntervals)])
ylimit2 = max(ylimit2, max(metric2_with_interval[-1]))
for hostID in range(len(listinfo[0][metric])):
axes[hostID].set_ylim(0, max(ylimit, ylimit2))
axes[hostID].plot(x, metric_with_interval[hostID])
if metric2:
axes[hostID].plot(x, metric2_with_interval[hostID])
axes[hostID].set_ylabel(obj[0].capitalize()+" "+str(hostID))
axes[hostID].grid(b=True, which='both', color='#eeeeee', linestyle='-')
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + title + '.pdf')
def generateMetricsWithInterval(self, dirname):
fig, axes = plt.subplots(9, 1, sharex=True, figsize=(4, 5))
x = list(range(len(self.metrics)))
res = {}
for i,metric in enumerate(['numdestroyed', 'nummigrations', 'energytotalinterval', 'avgresponsetime',\
'avgmigrationtime', 'slaviolations', 'slaviolationspercentage', 'waittime', 'energypercontainerinterval']):
metric_with_interval = [self.metrics[i][metric] for i in range(len(self.metrics))] if metric != 'waittime' else \
[sum(self.metrics[i][metric]) for i in range(len(self.metrics))]
axes[i].plot(x, metric_with_interval)
axes[i].set_ylabel(metric, fontsize=5)
axes[i].grid(b=True, which='both', color='#eeeeee', linestyle='-')
res[metric] = sum(metric_with_interval)
print("Summation ", metric, " = ", res[metric])
print('Average energy (sum energy interval / sum numdestroyed) = ', res['energytotalinterval']/res['numdestroyed'])
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + 'Metrics' + '.pdf')
def generateWorkloadWithInterval(self, dirname):
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 5))
x = list(range(len(self.workloadinfo)))
for i, metric in enumerate(['totalcontainers', 'newcontainers', 'deployed', 'migrations', 'inqueue']):
metric_with_interval = [self.workloadinfo[i][metric] for i in range(len(self.workloadinfo))]
axes[i].plot(x, metric_with_interval)
axes[i].set_ylabel(metric)
axes[i].grid(b=True, which='both', color='#eeeeee', linestyle='-')
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + 'Workload' + '.pdf')
########################################################################################################
def generateCompleteDataset(self, dirname, data, name):
title = name + '_with_interval'
metric_with_interval = []
headers = list(data[0].keys())
for datum in data:
metric_with_interval.append([datum[value] for value in datum.keys()])
df = pd.DataFrame(metric_with_interval, columns=headers)
df.to_csv(dirname + '/' + title + '.csv', index=False)
def generateDatasetWithInterval(self, dirname, metric, objfunc, metric2=None, objfunc2=None):
title = metric + '_' + (metric2 + '_' if metric2 else "") + (objfunc + '_' if objfunc else "") + (objfunc2 + '_' if objfunc2 else "") + 'with_interval'
totalIntervals = len(self.hostinfo)
metric_with_interval = []; metric2_with_interval = [] # metric1 is of host and metric2 is of containers
host_alloc_with_interval = []; objfunc2_with_interval = []
objfunc_with_interval = []
for interval in range(totalIntervals-1):
metric_with_interval.append([self.hostinfo[interval][metric][hostID] for hostID in range(len(self.hostinfo[0][metric]))])
host_alloc_with_interval.append([self.activecontainerinfo[interval]['hostalloc'][cID] for cID in range(len(self.activecontainerinfo[0]['hostalloc']))])
objfunc_with_interval.append(self.metrics[interval+1][objfunc])
if metric2:
metric2_with_interval.append(self.activecontainerinfo[interval][metric2])
if objfunc2:
objfunc2_with_interval.append(self.metrics[interval+1][objfunc2])
df = pd.DataFrame(metric_with_interval)
if metric2: df = pd.concat([df, pd.DataFrame(metric2_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(host_alloc_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(objfunc_with_interval)], axis=1)
if objfunc2: df = pd.concat([df, pd.DataFrame(objfunc2_with_interval)], axis=1)
df.to_csv(dirname + '/' + title + '.csv' , header=False, index=False)
def generateDatasetWithInterval2(self, dirname, metric, metric2, metric3, metric4, objfunc, objfunc2):
title = metric + '_' + metric2 + '_' + metric3 + '_' + metric4 + '_' +objfunc + '_' + objfunc2 + '_' + 'with_interval'
totalIntervals = len(self.hostinfo)
metric_with_interval = []; metric2_with_interval = []
metric3_with_interval = []; metric4_with_interval = []
host_alloc_with_interval = []; objfunc2_with_interval = []
objfunc_with_interval = []
for interval in range(totalIntervals-1):
metric_with_interval.append([self.hostinfo[interval][metric][hostID] for hostID in range(len(self.hostinfo[0][metric]))])
host_alloc_with_interval.append([self.activecontainerinfo[interval]['hostalloc'][cID] for cID in range(len(self.activecontainerinfo[0]['hostalloc']))])
objfunc_with_interval.append(self.metrics[interval+1][objfunc])
metric2_with_interval.append(self.activecontainerinfo[interval][metric2])
metric3_with_interval.append(self.metrics[interval][metric3])
metric4_with_interval.append(self.metrics[interval][metric4])
objfunc2_with_interval.append(self.metrics[interval+1][objfunc2])
df = pd.DataFrame(metric_with_interval)
df = pd.concat([df, | pd.DataFrame(metric2_with_interval) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
| tm.assert_frame_equal(res, exp) | pandas._testing.assert_frame_equal |
from Grid_Generator import gen_grid
from A_Star import a_star
import pandas as pd
df = pd.DataFrame(columns=['p','Solvable']) #made use of pandas library to store data
p=0.01
while p < 1: #recording values between 0.01 <= p < 1
for i in range(100): #recording 100 values for each density value
grid=gen_grid(101,p)
result, start, stop, parent = a_star(101, p, grid, 1)
df1 = | pd.DataFrame([[p, result]],columns=['p', 'Solvable']) | pandas.DataFrame |
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from joblib import dump, load
import pandas as pd
# for tree models
def lable_encoding(
X_train: pd.DataFrame,
X_test: pd.DataFrame,
attrs: [str] = None
):
print('Lable encodind')
attributes = attrs if attrs is not None else X_train.columns.values
new_attributes = [attr + '_le' for attr in attributes]
train = X_train[attributes]
test = X_test[attributes]
print('Input train shape:', train.shape)
print('Input test shape:', test.shape)
print('Attributes:', attributes)
train_encods = []
test_encods = []
for attr, new_attr in zip(attributes, new_attributes):
encoder = LabelEncoder()
encoder.fit(train[attr])
train_encoded = encoder.transform(train[attr])
test_encoded = encoder.transform(test[attr])
train_encoded_df = pd.DataFrame(train_encoded, columns=[new_attr])
test_encoded_df = pd.DataFrame(test_encoded, columns=[new_attr])
train_encods.append(train_encoded_df)
test_encods.append(test_encoded_df)
return | pd.concat(train_encods, axis=1) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
import pickle
from siuba import *
from datetime import datetime as dt
# Opening SHAP results with pickle
infile = open("lgbm_dict", "rb")
lgbm_dict = pickle.load(infile)
asdas=pickle.load(infile)
df_r2 = pd.DataFrame(columns=["game_id", "year", "r2_test", "r2_train"])
df_RMSE = pd.DataFrame(columns=["game_id", "year", "test", "train"])
for name, values in lgbm_dict.items():
r2 = pd.DataFrame({"game_id": [name], "year": [values[4]], "r2_test": [values[1]], "r2_train":[values[3]]})
df_r2 = df_r2.append(r2)
RMSE = | pd.DataFrame({"game_id": [name], "year": [values[4]], "test": [values[0]], "train": [values[2]]}) | pandas.DataFrame |
from collections import OrderedDict
import george
from george import kernels
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
from astropy.cosmology import FlatLambdaCDM
from scipy.optimize import minimize
from sklearn.model_selection import StratifiedKFold
from scipy.signal import find_peaks
from scipy.special import erf
import tqdm
from settings import settings
# Parameters of the dataset
num_passbands = 6
pad = 100
start_mjd = 59580 - pad
end_mjd = 60675 + pad
# Define class labels, galactic vs extragalactic label and weights
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99]
class_weights = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1,
67: 1, 88: 1, 90: 1, 92: 1, 95: 1, 99: 2}
class_galactic = {6: True, 15: False, 16: True, 42: False, 52: False, 53: True,
62: False, 64: False, 65: True, 67: False, 88: False, 90:
False, 92: True, 95: False}
# Reverse engineered cosmology used in sims
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
def find_time_to_fractions(fluxes, fractions, forward=True):
"""Find the time for a lightcurve to decline to a specific fraction of
maximum light.
fractions should be a decreasing list of the fractions of maximum light
that will be found (eg: [0.8, 0.5, 0.2]).
"""
max_time = np.argmax(fluxes)
max_flux = fluxes[max_time]
result = np.ones(len(fractions)) * 99999
frac_idx = 0
# Start at maximum light, and move along the spectrum. Whenever we cross
# one threshold, we add it to the list and keep going. If we hit the end of
# the array without crossing the threshold, we return a large number for
# that time.
offset = 0
while True:
offset += 1
if forward:
new_time = max_time + offset
if new_time >= fluxes.shape:
break
else:
new_time = max_time - offset
if new_time < 0:
break
test_flux = fluxes[new_time]
while test_flux < max_flux * fractions[frac_idx]:
result[frac_idx] = offset
frac_idx += 1
if frac_idx == len(fractions):
break
if frac_idx == len(fractions):
break
return result
def multi_weighted_logloss(y_true, y_preds):
"""
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
if y_preds.shape[1] != len(classes):
# No prediction for 99, pretend that it doesn't exist.
use_classes = classes[:-1]
else:
use_classes = classes
y_p = y_preds
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weights[i] for i in use_classes])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
def lgb_multi_weighted_logloss(y_true, y_preds):
"""Wrapper around multi_weighted_logloss that works with lgbm"""
y_p = y_preds.reshape(y_true.shape[0], len(classes) - 1, order='F')
loss = multi_weighted_logloss(y_true, y_p)
return 'wloss', loss, False
def do_predictions_flatprob(object_ids, features, classifiers):
pred = 0
for classifier in classifiers:
pred += (
classifier.predict_proba(
features, num_iteration=classifier.best_iteration_)
) / len(classifiers)
# Add in flat prediction for class 99. This prediction depends on whether
# the object is galactic or extragalactic.
gal_frac_99 = 0.04
# Weights without 99 included.
weight_gal = sum([class_weights[class_id] for class_id, is_gal in
class_galactic.items() if is_gal])
weight_extgal = sum([class_weights[class_id] for class_id, is_gal in
class_galactic.items() if not is_gal])
guess_99_gal = gal_frac_99 * class_weights[99] / weight_gal
guess_99_extgal = (1 - gal_frac_99) * class_weights[99] / weight_extgal
is_gals = features['hostgal_photoz'] == 0.
pred_99 = np.array([guess_99_gal if is_gal else guess_99_extgal for is_gal
in is_gals])
stack_pred = np.hstack([pred, pred_99[:, None]])
# Normalize
stack_pred = stack_pred / np.sum(stack_pred, axis=1)[:, None]
# Build a pandas dataframe with the result
df = pd.DataFrame(index=object_ids, data=stack_pred,
columns=['class_%d' % i for i in classes])
return df
def do_predictions(object_ids, features, classifiers, gal_outlier_score=0.25,
extgal_outlier_score=1.4):
print("OLD!!! DON'T USE!")
is_gal = features['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(features), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
pred = 0
for classifier in classifiers:
# Get base scores
raw_scores = classifier.predict_proba(
features, raw_score=True, num_iteration=classifier.best_iteration_
)
max_scores = np.max(raw_scores, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None,
max_scores)
# Add in class 99 scores.
scores = np.hstack([raw_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(scores) / np.sum(np.exp(scores), axis=1)[:, None]
pred += iter_pred / len(classifiers)
# Build a pandas dataframe with the result
df = pd.DataFrame(index=object_ids, data=pred,
columns=['class_%d' % i for i in classes])
return df
def do_scores(object_ids, features, classifiers):
scores = []
for classifier in classifiers:
scores.append(classifier.predict_proba(
features, raw_score=True,
num_iteration=classifier.best_iteration_))
scores = np.array(scores)
return scores
def convert_scores(meta, scores, gal_outlier_score=0.4,
extgal_outlier_score=1.4):
is_gal = meta['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(meta), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
pred = 0
for iter_scores in scores:
# Iterate over each classifier's scores if there were more than one.
# Get base scores
# max_scores = np.max(iter_scores, axis=1)[:, None]
max_scores = np.percentile(iter_scores, 100 * 12.5/13, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None, max_scores)
# Add in class 99 scores.
iter_full_scores = np.hstack([iter_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(iter_full_scores) / np.sum(np.exp(iter_full_scores),
axis=1)[:, None]
pred += iter_pred / len(scores)
print("Mean gal 99: %.5f" % np.mean(pred[is_gal, -1]))
print("Mean ext 99: %.5f" % np.mean(pred[~is_gal, -1]))
# Build a pandas dataframe with the result
df = pd.DataFrame(index=meta['object_id'], data=pred,
columns=['class_%d' % i for i in classes])
return df
def convert_scores_2(meta, scores, s2n, gal_outlier_score=-2.,
extgal_outlier_score=-0.8):
is_gal = meta['hostgal_photoz'] == 0.
base_class_99_scores = np.zeros((len(meta), 1))
base_class_99_scores[is_gal] = gal_outlier_score
base_class_99_scores[~is_gal] = extgal_outlier_score
base_class_99_scores[:, 0] += 1.5*np.log10(s2n)
pred = 0
for iter_scores in scores:
# Iterate over each classifier's scores if there were more than one.
# Get base scores
# max_scores = np.max(iter_scores, axis=1)[:, None]
max_scores = np.percentile(iter_scores, 100 * 12.5/13, axis=1)[:, None]
class_99_scores = np.clip(base_class_99_scores, None, max_scores)
# Add in class 99 scores.
iter_full_scores = np.hstack([iter_scores, class_99_scores])
# Turn the scores into a prediction
iter_pred = np.exp(iter_full_scores) / np.sum(np.exp(iter_full_scores),
axis=1)[:, None]
pred += iter_pred / len(scores)
print("Mean gal 99: %.5f" % np.mean(pred[is_gal, -1]))
print("Mean ext 99: %.5f" % np.mean(pred[~is_gal, -1]))
# Build a pandas dataframe with the result
df = pd.DataFrame(index=meta['object_id'], data=pred,
columns=['class_%d' % i for i in classes])
return df
def fit_classifier(train_x, train_y, train_weights, eval_x=None, eval_y=None,
eval_weights=None, **kwargs):
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.05,
# 'bagging_fraction': .75,
# 'bagging_freq': 5,
'colsample_bytree': .5,
'reg_alpha': 0.,
'reg_lambda': 0.,
'min_split_gain': 10.,
'min_child_weight': 2000.,
'n_estimators': 5000,
'silent': -1,
'verbose': -1,
'max_depth': 7,
'num_leaves': 50,
}
lgb_params.update(kwargs)
fit_params = {
'verbose': 100,
'sample_weight': train_weights,
}
if eval_x is not None:
fit_params['eval_set'] = [(eval_x, eval_y)]
fit_params['eval_metric'] = lgb_multi_weighted_logloss
fit_params['early_stopping_rounds'] = 50
fit_params['eval_sample_weight'] = [eval_weights]
classifier = lgb.LGBMClassifier(**lgb_params)
classifier.fit(train_x, train_y, **fit_params)
return classifier
class Dataset(object):
def __init__(self):
"""Class to represent part of the PLAsTiCC dataset.
This class can load either the training or validation data, can produce
features and then can create outputs. The features can also be loaded
from a file to avoid having to recalculate them every time. Not
everything has to be loaded at once, but some functions might not work
if that is the case. I haven't put in the effort to make everything
safe with regards to random calls, so if something breaks you probably
just need to load the data that it needs.
"""
self.flux_data = None
self.meta_data = None
self.features = None
self.dataset_name = None
# Update this whenever the feature calculation code is updated.
self._features_version = settings['FEATURES_VERSION']
# Update this whenever the augmentation code is updated.
self._augment_version = settings['AUGMENT_VERSION']
def load_training_data(self):
"""Load the training dataset."""
self.flux_data = pd.read_csv(settings['RAW_TRAINING_PATH'])
self.meta_data = pd.read_csv(settings["RAW_TRAINING_METADATA_PATH"])
# Label folds
y = self.meta_data['target']
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
kfold_indices = -1*np.ones(len(y))
for idx, (fold_train, fold_val) in enumerate(folds.split(y, y)):
kfold_indices[fold_val] = idx
self.meta_data['fold'] = kfold_indices
self.dataset_name = 'train'
def load_test_data(self):
"""Load the test metadata."""
self.meta_data = pd.read_csv(settings["RAW_TEST_METADATA_PATH"])
self.dataset_name = 'test'
def load_chunk(self, chunk_idx, load_flux_data=True):
"""Load a chunk from the test dataset.
I previously split up the dataset into smaller files that can be read
into memory.
By default, the flux data is loaded which takes a long time. That can
be turned off if desired.
"""
path = settings["SPLIT_TEST_PATH_FORMAT"] % chunk_idx
if load_flux_data:
self.flux_data = pd.read_hdf(path, 'df')
self.meta_data = pd.read_hdf(path, 'meta')
self.dataset_name = 'test_%04d' % chunk_idx
def load_augment(self, num_augments, base_name='train'):
"""Load an augmented dataset."""
dataset_name = '%s_augment_v%d_%d' % (
base_name, self._augment_version, num_augments
)
path = '%s/%s.h5' % (settings['AUGMENT_DIR'], dataset_name)
self.flux_data = pd.read_hdf(path, 'df')
self.meta_data = pd.read_hdf(path, 'meta')
self.dataset_name = dataset_name
@property
def features_path(self):
"""Path to the features file for this dataset"""
features_path = settings['FEATURES_PATH_FORMAT'] % (
self._features_version, self.dataset_name)
return features_path
def load_simple_features(self):
"""Load the features for a dataset and postprocess them.
This assumes that the features have already been created.
"""
self.raw_features = pd.read_hdf(self.features_path)
rf = self.raw_features
# Keys that we want to use in the prediction.
use_keys = [
'hostgal_photoz',
'hostgal_photoz_err',
'count',
]
features = rf[use_keys].copy()
features['length_scale'] = rf['gp_fit_1']
features['max_flux'] = rf['max_flux_3']
features['max_flux_ratio_r'] = (
(rf['max_flux_5'] - rf['max_flux_3']) /
(np.abs(rf['max_flux_5']) + np.abs(rf['max_flux_3']))
)
features['max_flux_ratio_b'] = (
(rf['max_flux_3'] - rf['max_flux_0']) /
(np.abs(rf['max_flux_3']) + np.abs(rf['max_flux_0']))
)
features['min_flux'] = rf['min_flux_3']
features['min_flux_ratio_r'] = (
(rf['min_flux_5'] - rf['min_flux_3']) /
(np.abs(rf['min_flux_5']) + np.abs(rf['min_flux_3']))
)
features['min_flux_ratio_b'] = (
(rf['min_flux_3'] - rf['min_flux_0']) /
(np.abs(rf['min_flux_3']) + np.abs(rf['min_flux_0']))
)
features['max_dt'] = rf['max_dt_5'] - rf['max_dt_0']
features['positive_width'] = rf['positive_width_3']
features['negative_width'] = rf['negative_width_3']
features['frac_time_fwd_0.8'] = rf['frac_time_fwd_0.8_3']
features['frac_time_fwd_0.5'] = rf['frac_time_fwd_0.5_3']
features['frac_time_fwd_0.2'] = rf['frac_time_fwd_0.2_3']
features['ratio_r_time_fwd_0.8'] = (
rf['frac_time_fwd_0.8_3'] / rf['frac_time_fwd_0.8_5'])
features['ratio_b_time_fwd_0.8'] = (
rf['frac_time_fwd_0.8_3'] / rf['frac_time_fwd_0.8_0'])
features['ratio_r_time_fwd_0.5'] = (
rf['frac_time_fwd_0.5_3'] / rf['frac_time_fwd_0.5_5'])
features['ratio_b_time_fwd_0.5'] = (
rf['frac_time_fwd_0.5_3'] / rf['frac_time_fwd_0.5_0'])
features['ratio_r_time_fwd_0.2'] = (
rf['frac_time_fwd_0.2_3'] / rf['frac_time_fwd_0.2_5'])
features['ratio_b_time_fwd_0.5'] = (
rf['frac_time_fwd_0.2_3'] / rf['frac_time_fwd_0.2_0'])
features['frac_time_bwd_0.8'] = rf['frac_time_bwd_0.8_3']
features['frac_time_bwd_0.5'] = rf['frac_time_bwd_0.5_3']
features['frac_time_bwd_0.2'] = rf['frac_time_bwd_0.2_3']
features['ratio_r_time_bwd_0.8'] = (
rf['frac_time_bwd_0.8_3'] / rf['frac_time_bwd_0.8_5'])
features['ratio_b_time_bwd_0.8'] = (
rf['frac_time_bwd_0.8_3'] / rf['frac_time_bwd_0.8_0'])
features['ratio_r_time_bwd_0.5'] = (
rf['frac_time_bwd_0.5_3'] / rf['frac_time_bwd_0.5_5'])
features['ratio_b_time_bwd_0.5'] = (
rf['frac_time_bwd_0.5_3'] / rf['frac_time_bwd_0.5_0'])
features['ratio_r_time_bwd_0.2'] = (
rf['frac_time_bwd_0.2_3'] / rf['frac_time_bwd_0.2_5'])
features['ratio_b_time_bwd_0.5'] = (
rf['frac_time_bwd_0.2_3'] / rf['frac_time_bwd_0.2_0'])
features['frac_s2n_5'] = rf['count_s2n_5'] / rf['count']
features['frac_s2n_-5'] = rf['count_s2n_-5'] / rf['count']
features['frac_background'] = rf['frac_background']
features['time_width_s2n_5'] = rf['time_width_s2n_5']
features['count_max_center'] = rf['count_max_center']
features['count_max_rise_20'] = rf['count_max_rise_20']
features['count_max_rise_50'] = rf['count_max_rise_50']
features['count_max_rise_100'] = rf['count_max_rise_100']
features['count_max_fall_20'] = rf['count_max_fall_20']
features['count_max_fall_50'] = rf['count_max_fall_50']
features['count_max_fall_100'] = rf['count_max_fall_100']
features['num_peaks'] = np.nanmedian([
rf['peaks_pos_0_count'],
rf['peaks_pos_1_count'],
rf['peaks_pos_2_count'],
rf['peaks_pos_3_count'],
rf['peaks_pos_4_count'],
rf['peaks_pos_5_count']
], axis=0)
features['peak_frac_2'] = np.nanmedian([
rf['peaks_pos_0_frac_2'],
rf['peaks_pos_1_frac_2'],
rf['peaks_pos_2_frac_2'],
rf['peaks_pos_3_frac_2'],
rf['peaks_pos_4_frac_2'],
rf['peaks_pos_5_frac_2']
], axis=0)
features['peak_frac_3'] = np.nanmedian([
rf['peaks_pos_0_frac_3'],
rf['peaks_pos_1_frac_3'],
rf['peaks_pos_2_frac_3'],
rf['peaks_pos_3_frac_3'],
rf['peaks_pos_4_frac_3'],
rf['peaks_pos_5_frac_3']
], axis=0)
features['total_s2n'] = (
rf['total_s2n_0'] +
rf['total_s2n_1'] +
rf['total_s2n_2'] +
rf['total_s2n_3'] +
rf['total_s2n_4'] +
rf['total_s2n_5']
)
self.features = features
def load_features(self):
"""Load the features for a dataset and postprocess them.
This assumes that the features have already been created.
"""
self.raw_features = pd.read_hdf(self.features_path)
# Drop keys that we don't want to use in the prediction
drop_keys = [
'object_id',
'hostgal_specz',
# 'hostgal_photoz',
# 'distmod',
'ra',
'decl',
'gal_l',
'gal_b',
'mwebv',
'ddf',
'max_time',
# 'hostgal_photoz',
]
features = self.raw_features
for key in drop_keys:
try:
features = features.drop(key, 1)
except KeyError:
# Key doesn't exist in this version. Ignore it.
pass
self.features = features
def _get_gp_data(self, object_meta, object_data, subtract_median=True):
times = []
fluxes = []
bands = []
flux_errs = []
# The zeropoints were arbitrarily set from the first image. Pick the
# 20th percentile of all observations in each channel as a new
# zeropoint. This has good performance when there are supernova-like
# bursts in the image, even if they are quite wide.
# UPDATE: when picking the 20th percentile, observations with just
# noise get really messed up. Revert back to the median for now and see
# if that helps. It doesn't really matter if supernovae go slightly
# negative...
for passband in range(num_passbands):
band_data = object_data[object_data['passband'] == passband]
if len(band_data) == 0:
# No observations in this band
continue
# ref_flux = np.percentile(band_data['flux'], 20)
ref_flux = np.median(band_data['flux'])
for idx, row in band_data.iterrows():
times.append(row['mjd'] - start_mjd)
flux = row['flux']
if subtract_median:
flux = flux - ref_flux
fluxes.append(flux)
bands.append(passband)
flux_errs.append(row['flux_err'])
times = np.array(times)
bands = np.array(bands)
fluxes = np.array(fluxes)
flux_errs = np.array(flux_errs)
# Guess the scale based off of the highest signal-to-noise point.
# Sometimes the edge bands are pure noise and can have large
# insignificant points.
scale = fluxes[np.argmax(fluxes / flux_errs)]
gp_data = {
'meta': object_meta,
'times': times,
'bands': bands,
'scale': scale,
'fluxes': fluxes,
'flux_errs': flux_errs,
}
return gp_data
def get_gp_data(self, idx, target=None, verbose=False,
subtract_median=True):
if target is not None:
target_data = self.meta_data[self.meta_data['target'] == target]
object_meta = target_data.iloc[idx]
else:
object_meta = self.meta_data.iloc[idx]
if verbose:
print(object_meta)
object_id = object_meta['object_id']
object_data = self.flux_data[self.flux_data['object_id'] == object_id]
return self._get_gp_data(object_meta, object_data)
def fit_gp(self, idx=None, target=None, object_meta=None, object_data=None,
verbose=False, guess_length_scale=20., fix_scale=False):
if idx is not None:
# idx was specified, pull from the internal data
gp_data = self.get_gp_data(idx, target, verbose)
else:
# The meta data and flux data can also be directly specified.
gp_data = self._get_gp_data(object_meta, object_data)
# GP kernel. We use a 2-dimensional Matern kernel to model the
# transient. The kernel amplitude is fixed to a fraction of the maximum
# value in the data, and the kernel width in the wavelength direction
# is also fixed. We fit for the kernel width in the time direction as
# different transients evolve on very different time scales.
kernel = ((0.2*gp_data['scale'])**2 *
kernels.Matern32Kernel([guess_length_scale**2, 5**2],
ndim=2))
# print(kernel.get_parameter_names())
if fix_scale:
kernel.freeze_parameter('k1:log_constant')
kernel.freeze_parameter('k2:metric:log_M_1_1')
gp = george.GP(kernel)
if verbose:
print(kernel.get_parameter_dict())
x_data = np.vstack([gp_data['times'], gp_data['bands']]).T
gp.compute(x_data, gp_data['flux_errs'])
fluxes = gp_data['fluxes']
def neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.log_likelihood(fluxes)
def grad_neg_ln_like(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(fluxes)
# print(np.exp(gp.get_parameter_vector()))
bounds = [(0, np.log(1000**2))]
if not fix_scale:
bounds = [(-30, 30)] + bounds
fit_result = minimize(
neg_ln_like,
gp.get_parameter_vector(),
jac=grad_neg_ln_like,
# bounds=[(-30, 30), (0, 10), (0, 5)],
# bounds=[(0, 10), (0, 5)],
bounds=bounds,
# bounds=[(-30, 30), (0, np.log(1000**2))],
# options={'ftol': 1e-4}
)
if not fit_result.success:
print("Fit failed for %d!" % idx)
# print(-gp.log_likelihood(fluxes))
# print(np.exp(fit_result.x))
gp.set_parameter_vector(fit_result.x)
if verbose:
print(fit_result)
print(kernel.get_parameter_dict())
pred = []
pred_times = np.arange(end_mjd - start_mjd + 1)
for band in range(6):
pred_bands = np.ones(len(pred_times)) * band
pred_x_data = np.vstack([pred_times, pred_bands]).T
# pred, pred_var = gp.predict(fluxes, pred_x_data, return_var=True)
# band_pred, pred_var = gp.predict(fluxes, pred_x_data,
# return_var=True)
# band_pred = gp.predict(fluxes, pred_x_data, return_var=False)
band_pred = gp.predict(fluxes, pred_x_data, return_cov=False)
pred.append(band_pred)
pred = np.array(pred)
# Add results of the GP fit to the gp_data dictionary.
gp_data['pred_times'] = pred_times
gp_data['pred'] = pred
gp_data['fit_parameters'] = fit_result.x
return gp_data
def plot_gp(self, *args, **kwargs):
result = self.fit_gp(*args, **kwargs)
plt.figure()
for band in range(num_passbands):
cut = result['bands'] == band
color = 'C%d' % band
plt.errorbar(result['times'][cut], result['fluxes'][cut],
result['flux_errs'][cut], fmt='o', c=color)
plt.plot(result['pred_times'], result['pred'][band], c=color,
label=band)
plt.legend()
def plot_gp_interactive(self):
"""Make an interactive plot of the GP output.
This requires the ipywidgets package to be set up, and has only been
tested in jupyter-lab.
"""
from ipywidgets import interact, IntSlider, Dropdown, fixed
targets = np.unique(self.meta_data['target'])
idx_widget = IntSlider(min=0, max=1)
target_widget = Dropdown(options=targets, index=0)
def update_idx_range(*args):
idx_widget.max = np.sum(self.meta_data['target'] ==
target_widget.value) - 1
target_widget.observe(update_idx_range, 'value')
update_idx_range()
interact(self.plot_gp, idx=idx_widget, target=target_widget,
object_meta=fixed(None), object_data=fixed(None))
def extract_features(self, *args, **kwargs):
"""Extract features from a target"""
features = OrderedDict()
# Fit the GP and produce an output model
gp_data = self.fit_gp(*args, **kwargs)
times = gp_data['times']
fluxes = gp_data['fluxes']
flux_errs = gp_data['flux_errs']
bands = gp_data['bands']
s2ns = fluxes / flux_errs
pred = gp_data['pred']
meta = gp_data['meta']
# Add the object id. This shouldn't be used for training a model, but
# is necessary to identify which row is which when we split things up.
features['object_id'] = meta['object_id']
# Features from the meta data
features['hostgal_specz'] = meta['hostgal_specz']
features['hostgal_photoz'] = meta['hostgal_photoz']
features['hostgal_photoz_err'] = meta['hostgal_photoz_err']
features['ra'] = meta['ra']
features['decl'] = meta['decl']
features['gal_l'] = meta['gal_l']
features['gal_b'] = meta['gal_b']
features['distmod'] = meta['distmod']
features['mwebv'] = meta['mwebv']
features['ddf'] = meta['ddf']
# Count how many observations there are
features['count'] = len(fluxes)
# Features from GP fit parameters
for i, fit_parameter in enumerate(gp_data['fit_parameters']):
features['gp_fit_%d' % i] = fit_parameter
# Maximum fluxes and times.
max_times = np.argmax(pred, axis=1)
med_max_time = np.median(max_times)
max_dts = max_times - med_max_time
max_fluxes = np.array([pred[band, time] for band, time in
enumerate(max_times)])
features['max_time'] = med_max_time
for band, (max_flux, max_dt) in enumerate(zip(max_fluxes, max_dts)):
features['max_flux_%d' % band] = max_flux
features['max_dt_%d' % band] = max_dt
# Minimum fluxes.
min_fluxes = np.min(pred, axis=1)
for band, min_flux in enumerate(min_fluxes):
features['min_flux_%d' % band] = min_flux
# Calculate the positive and negative integrals of the lightcurve,
# normalized to the respective peak fluxes. This gives a measure of the
# "width" of the lightcurve, even for non-bursty objects.
positive_widths = np.sum(np.clip(pred, 0, None), axis=1) / max_fluxes
negative_widths = np.sum(np.clip(pred, None, 0), axis=1) / min_fluxes
for band in range(num_passbands):
features['positive_width_%d' % band] = positive_widths[band]
features['negative_width_%d' % band] = negative_widths[band]
# Find times to fractions of the peak amplitude
fractions = [0.8, 0.5, 0.2]
for band in range(num_passbands):
forward_times = find_time_to_fractions(pred[band], fractions)
backward_times = find_time_to_fractions(pred[band], fractions,
forward=False)
for fraction, forward_time, backward_time in \
zip(fractions, forward_times, backward_times):
features['frac_time_fwd_%.1f_%d' % (fraction, band)] = \
forward_time
features['frac_time_bwd_%.1f_%d' % (fraction, band)] = \
backward_time
# Count the number of data points with significant positive/negative
# fluxes
thresholds = [-20, -10, -5, -3, 3, 5, 10, 20]
for threshold in thresholds:
if threshold < 0:
count = np.sum(s2ns < threshold)
else:
count = np.sum(s2ns > threshold)
features['count_s2n_%d' % threshold] = count
# Count the fraction of data points that are "background", i.e. less
# than a 3 sigma detection of something.
features['frac_background'] = np.sum(np.abs(s2ns) < 3) / len(s2ns)
# Sum up the total signal-to-noise in each band
for band in range(6):
mask = bands == band
band_fluxes = fluxes[mask]
band_flux_errs = flux_errs[mask]
total_band_s2n = np.sqrt(np.sum((band_fluxes / band_flux_errs)**2))
features['total_s2n_%d' % band] = total_band_s2n
# Count the time delay between the first and last significant fluxes
thresholds = [5, 10, 20]
for threshold in thresholds:
significant_times = times[np.abs(s2ns) > threshold]
if len(significant_times) < 2:
dt = -1
else:
dt = np.max(significant_times) - np.min(significant_times)
features['time_width_s2n_%d' % threshold] = dt
# Count how many data points are within a certain number of days of
# maximum light. This provides some estimate of the robustness of the
# determination of maximum light and rise/fall times.
time_bins = [
(-5, 5, 'center'),
(-20, -5, 'rise_20'),
(-50, -20, 'rise_50'),
(-100, -50, 'rise_100'),
(-200, -100, 'rise_200'),
(-300, -200, 'rise_300'),
(-400, -300, 'rise_400'),
(-500, -400, 'rise_500'),
(-600, -500, 'rise_600'),
(-700, -600, 'rise_700'),
(-800, -700, 'rise_800'),
(5, 20, 'fall_20'),
(20, 50, 'fall_50'),
(50, 100, 'fall_100'),
(100, 200, 'fall_200'),
(200, 300, 'fall_300'),
(300, 400, 'fall_400'),
(400, 500, 'fall_500'),
(500, 600, 'fall_600'),
(600, 700, 'fall_700'),
(700, 800, 'fall_800'),
]
for start, end, label in time_bins:
diff_times = times - med_max_time
mask = (diff_times > start) & (diff_times < end)
# Count how many observations there are in the time bin
count = np.sum(mask)
features['count_max_%s' % label] = count
# Measure the GP flux level relative to the peak flux. We do this
# by taking the median flux in each band and comparing it to the
# peak flux.
bin_start = np.clip(int(med_max_time + start), 0, None)
bin_end = np.clip(int(med_max_time + end), 0, None)
if bin_start == bin_end:
scale_pred = np.nan
bin_mean_fluxes = np.nan
bin_std_fluxes = np.nan
else:
scale_pred = pred[:, bin_start:bin_end] / max_fluxes[:, None]
bin_mean_fluxes = np.mean(scale_pred)
bin_std_fluxes = np.std(scale_pred)
features['mean_%s' % label] = bin_mean_fluxes
features['std_%s' % label] = bin_std_fluxes
# Do peak detection on the GP output
for positive in (True, False):
for band in range(num_passbands):
if positive:
band_flux = pred[band]
base_name = 'peaks_pos_%d' % band
else:
band_flux = -pred[band]
base_name = 'peaks_neg_%d' % band
peaks, properties = find_peaks(
band_flux,
height=np.max(np.abs(band_flux) / 5.)
)
num_peaks = len(peaks)
features['%s_count' % base_name] = num_peaks
sort_heights = np.sort(properties['peak_heights'])[::-1]
# Measure the fractional height of the other peaks.
for i in range(1, 3):
if num_peaks > i:
rel_height = sort_heights[i] / sort_heights[0]
else:
rel_height = np.nan
features['%s_frac_%d' % (base_name, (i+1))] = rel_height
return list(features.keys()), np.array(list(features.values()))
def extract_all_features(self):
"""Extract all features and save them to an HDF file.
"""
all_features = []
for i in tqdm.tqdm(range(len(self.meta_data))):
feature_labels, features = self.extract_features(i)
all_features.append(features)
feature_table = | pd.DataFrame(all_features, columns=feature_labels) | pandas.DataFrame |
import time
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from scipy.sparse import csr_matrix
from statsmodels.stats.multitest import fdrcorrection as fdr
from joblib import Parallel, delayed, parallel_backend
from typing import List, Tuple, Dict, Union, Optional
import logging
logger = logging.getLogger(__name__)
from anndata import AnnData
from pegasusio import timer, MultimodalData, UnimodalData
from pegasus.tools import eff_n_jobs
def _calc_qvals(
nclust: int,
pvals: np.ndarray,
first_j: int,
second_j: int,
) -> np.ndarray:
""" Calculate FDR
"""
qvals = np.zeros(pvals.shape, dtype = np.float32)
if second_j > 0:
_, qval = fdr(pvals[:, first_j])
qvals[:, first_j] = qvals[:, second_j] = qval
else:
for j in range(nclust):
_, qvals[:, j] = fdr(pvals[:, j])
return qvals
def _de_test(
X: csr_matrix,
cluster_labels: pd.Categorical,
gene_names: List[str],
n_jobs: int,
t: Optional[bool] = False,
fisher: Optional[bool] = False,
temp_folder: Optional[str] = None,
verbose: Optional[bool] = True,
) -> pd.DataFrame:
""" Collect sufficient statistics, run Mann-Whitney U test, calculate auroc (triggering diff_expr_utils.calc_mwu in parallel), optionally run Welch's T test and Fisher's Exact test (in parallel).
"""
from pegasus.cylib.de_utils import csr_to_csc, calc_mwu, calc_stat
start = time.perf_counter()
ords = np.argsort(cluster_labels.codes)
data, indices, indptr = csr_to_csc(X.data, X.indices, X.indptr, X.shape[0], X.shape[1], ords)
cluster_cnts = cluster_labels.value_counts()
n1arr = cluster_cnts.values
n2arr = X.shape[0] - n1arr
cluster_cumsum = cluster_cnts.cumsum().values
nclust = n1arr.size
first_j = second_j = -1
posvec = np.where(n1arr > 0)[0]
if len(posvec) == 2:
first_j = posvec[0]
second_j = posvec[1]
if verbose:
end = time.perf_counter()
logger.info(f"CSR matrix is converted to CSC matrix. Time spent = {end - start:.4f}s.")
start = end
# logger.info(f"Preparation (including converting X to csc_matrix format) for MWU test is finished. Time spent = {time.perf_counter() - start:.2f}s.")
ngene = X.shape[1]
quotient = ngene // n_jobs
residue = ngene % n_jobs
intervals = []
start_pos = end_pos = 0
for i in range(n_jobs):
end_pos = start_pos + quotient + (i < residue)
if end_pos == start_pos:
break
intervals.append((start_pos, end_pos))
start_pos = end_pos
with parallel_backend("loky", inner_max_num_threads=1):
result_list = Parallel(n_jobs=len(intervals), temp_folder=temp_folder)(
delayed(calc_mwu)(
start_pos,
end_pos,
data,
indices,
indptr,
n1arr,
n2arr,
cluster_cumsum,
first_j,
second_j,
verbose,
)
for start_pos, end_pos in intervals
)
Ulist = []
plist = []
alist = []
for U_stats, pvals, aurocs in result_list:
Ulist.append(U_stats)
plist.append(pvals)
alist.append(aurocs)
U_stats = np.concatenate(Ulist, axis = 0)
pvals = np.concatenate(plist, axis = 0)
aurocs = np.concatenate(alist, axis = 0)
qvals = _calc_qvals(nclust, pvals, first_j, second_j)
dfU = pd.DataFrame(U_stats, index = gene_names, columns = [f"{x}:mwu_U" for x in cluster_labels.categories])
dfUp = pd.DataFrame(pvals, index = gene_names, columns = [f"{x}:mwu_pval" for x in cluster_labels.categories])
dfUq = pd.DataFrame(qvals, index = gene_names, columns = [f"{x}:mwu_qval" for x in cluster_labels.categories])
dfUa = pd.DataFrame(aurocs, index = gene_names, columns = [f"{x}:auroc" for x in cluster_labels.categories])
if verbose:
end = time.perf_counter()
logger.info(f"MWU test and AUROC calculation are finished. Time spent = {end - start:.4f}s.")
start = end
# basic statistics and optional t test and fisher test
results = calc_stat(data, indices, indptr, n1arr, n2arr, cluster_cumsum, first_j, second_j, t, fisher, verbose)
dfl2M = pd.DataFrame(results[0][0], index = gene_names, columns = [f"{x}:log2Mean" for x in cluster_labels.categories])
dfl2Mo = pd.DataFrame(results[0][1], index = gene_names, columns = [f"{x}:log2Mean_other" for x in cluster_labels.categories])
dfl2FC = pd.DataFrame(results[0][2], index = gene_names, columns = [f"{x}:log2FC" for x in cluster_labels.categories])
dfpct = pd.DataFrame(results[0][3], index = gene_names, columns = [f"{x}:percentage" for x in cluster_labels.categories])
dfpcto = pd.DataFrame(results[0][4], index = gene_names, columns = [f"{x}:percentage_other" for x in cluster_labels.categories])
dfpfc = pd.DataFrame(results[0][5], index = gene_names, columns = [f"{x}:percentage_fold_change" for x in cluster_labels.categories])
df_list = [dfl2M, dfl2Mo, dfl2FC, dfpct, dfpcto, dfpfc, dfUa, dfU, dfUp, dfUq]
if verbose:
end = time.perf_counter()
logger.info(f"Sufficient statistics are collected. Time spent = {end - start:.4f}s.")
start = end
if t:
qvals = _calc_qvals(nclust, results[1][1], first_j, second_j)
dft = pd.DataFrame(results[1][0], index = gene_names, columns = [f"{x}:t_tstat" for x in cluster_labels.categories])
dftp = pd.DataFrame(results[1][1], index = gene_names, columns = [f"{x}:t_pval" for x in cluster_labels.categories])
dftq = pd.DataFrame(qvals, index = gene_names, columns = [f"{x}:t_qval" for x in cluster_labels.categories])
df_list.extend([dft, dftp, dftq])
if verbose:
end = time.perf_counter()
logger.info(f"Welch's t-test is finished. Time spent = {end - start:.4f}s.")
start = end
if fisher:
from pegasus.cylib.cfisher import fisher_exact
a_true, a_false, b_true, b_false = results[1 if not t else 2]
oddsratios = np.zeros((ngene, n1arr.size), dtype = np.float32)
pvals = np.ones((ngene, n1arr.size), dtype = np.float32)
if second_j > 0:
oddsratio, pval = fisher_exact(a_true[first_j], a_false[first_j], b_true[first_j], b_false[first_j])
oddsratios[:, first_j] = oddsratio
idx1 = oddsratio > 0.0
idx2 = oddsratio < 1e30
oddsratios[idx1 & idx2, second_j] = 1.0 / oddsratio[idx1 & idx2]
oddsratios[~idx1] = 1e30
pvals[:, first_j] = pvals[:, second_j] = pval
else:
with parallel_backend("loky", inner_max_num_threads=1):
result_list = Parallel(n_jobs=n_jobs, temp_folder=temp_folder)(
delayed(fisher_exact)(
a_true[i],
a_false[i],
b_true[i],
b_false[i],
)
for i in posvec
)
for i in range(posvec.size):
oddsratios[:, posvec[i]] = result_list[i][0]
pvals[:, posvec[i]] = result_list[i][1]
qvals = _calc_qvals(nclust, pvals, first_j, second_j)
dff = pd.DataFrame(oddsratios, index = gene_names, columns = [f"{x}:fisher_oddsratio" for x in cluster_labels.categories])
dffp = pd.DataFrame(pvals, index = gene_names, columns = [f"{x}:fisher_pval" for x in cluster_labels.categories])
dffq = pd.DataFrame(qvals, index = gene_names, columns = [f"{x}:fisher_qval" for x in cluster_labels.categories])
df_list.extend([dff, dffp, dffq])
if verbose:
end = time.perf_counter()
logger.info(f"Fisher's exact test is finished. Time spent = {end - start:.4f}s.")
df = pd.concat(df_list, axis = 1)
return df
def _perform_de_cond(
clust_ids: List[str],
cond_labels: pd.Categorical,
gene_names: List[str],
cond_n1arr_list: List[List[int]],
cond_n2arr_list: List[List[int]],
cond_cumsum_list: List[List[int]],
data_list: List[List[float]],
indices_list: List[List[int]],
indptr_list: List[List[int]],
t: bool,
fisher: bool,
verbose: bool,
) -> List[pd.DataFrame]:
""" Run DE test for clusters specified. In each cluster, perform one vs. rest for the condition
"""
df_res_list = []
from pegasus.cylib.de_utils import calc_mwu, calc_stat
ngene = indptr_list[0].size - 1
for i, clust_id in enumerate(clust_ids):
nclust = cond_n1arr_list[i].size
first_j = second_j = -1
posvec = np.where(cond_n1arr_list[i] > 0)[0]
if len(posvec) == 2:
first_j = posvec[0]
second_j = posvec[1]
U_stats, pvals, aurocs = calc_mwu(0, ngene, data_list[i], indices_list[i], indptr_list[i], cond_n1arr_list[i], cond_n2arr_list[i], cond_cumsum_list[i], first_j, second_j, False)
qvals = _calc_qvals(nclust, pvals, first_j, second_j)
dfU = pd.DataFrame(U_stats, index = gene_names, columns = [f"{clust_id}:{x}:mwu_U" for x in cond_labels.categories])
dfUp = pd.DataFrame(pvals, index = gene_names, columns = [f"{clust_id}:{x}:mwu_pval" for x in cond_labels.categories])
dfUq = pd.DataFrame(qvals, index = gene_names, columns = [f"{clust_id}:{x}:mwu_qval" for x in cond_labels.categories])
dfUa = pd.DataFrame(aurocs, index = gene_names, columns = [f"{clust_id}:{x}:auroc" for x in cond_labels.categories])
results = calc_stat(data_list[i], indices_list[i], indptr_list[i], cond_n1arr_list[i], cond_n2arr_list[i], cond_cumsum_list[i], first_j, second_j, t, fisher, False)
dfl2M = pd.DataFrame(results[0][0], index = gene_names, columns = [f"{clust_id}:{x}:log2Mean" for x in cond_labels.categories])
dfl2Mo = pd.DataFrame(results[0][1], index = gene_names, columns = [f"{clust_id}:{x}:log2Mean_other" for x in cond_labels.categories])
dfl2FC = pd.DataFrame(results[0][2], index = gene_names, columns = [f"{clust_id}:{x}:log2FC" for x in cond_labels.categories])
dfpct = pd.DataFrame(results[0][3], index = gene_names, columns = [f"{clust_id}:{x}:percentage" for x in cond_labels.categories])
dfpcto = pd.DataFrame(results[0][4], index = gene_names, columns = [f"{clust_id}:{x}:percentage_other" for x in cond_labels.categories])
dfpfc = pd.DataFrame(results[0][5], index = gene_names, columns = [f"{clust_id}:{x}:percentage_fold_change" for x in cond_labels.categories])
df_list = [dfl2M, dfl2Mo, dfl2FC, dfpct, dfpcto, dfpfc, dfUa, dfU, dfUp, dfUq]
if t:
qvals = _calc_qvals(nclust, results[1][1], first_j, second_j)
dft = pd.DataFrame(results[1][0], index = gene_names, columns = [f"{clust_id}:{x}:t_tstat" for x in cond_labels.categories])
dftp = pd.DataFrame(results[1][1], index = gene_names, columns = [f"{clust_id}:{x}:t_pval" for x in cond_labels.categories])
dftq = pd.DataFrame(qvals, index = gene_names, columns = [f"{clust_id}:{x}:t_qval" for x in cond_labels.categories])
df_list.extend([dft, dftp, dftq])
if fisher:
a_true, a_false, b_true, b_false = results[1 if not t else 2]
from pegasus.cylib.cfisher import fisher_exact
oddsratios = np.zeros((ngene, nclust), dtype = np.float32)
pvals = np.ones((ngene, nclust), dtype = np.float32)
if second_j > 0:
oddsratio, pval = fisher_exact(a_true[first_j], a_false[first_j], b_true[first_j], b_false[first_j])
oddsratios[:, first_j] = oddsratio
idx1 = oddsratio > 0.0
idx2 = oddsratio < 1e30
oddsratios[idx1 & idx2, second_j] = 1.0 / oddsratio[idx1 & idx2]
oddsratios[~idx1] = 1e30
pvals[:, first_j] = pvals[:, second_j] = pval
else:
for j in posvec:
oddsratios[:, j], pvals[:, j] = fisher_exact(a_true[j], a_false[j], b_true[j], b_false[j])
qvals = _calc_qvals(nclust, pvals, first_j, second_j)
dff = pd.DataFrame(oddsratios, index = gene_names, columns = [f"{clust_id}:{x}:fisher_oddsratio" for x in cond_labels.categories])
dffp = pd.DataFrame(pvals, index = gene_names, columns = [f"{clust_id}:{x}:fisher_pval" for x in cond_labels.categories])
dffq = pd.DataFrame(qvals, index = gene_names, columns = [f"{clust_id}:{x}:fisher_qval" for x in cond_labels.categories])
df_list.extend([dff, dffp, dffq])
df_res_list.append(pd.concat(df_list, axis = 1))
if verbose:
clust_ids_str = ','.join([str(x) for x in clust_ids])
print(f"_perform_de_cond finished for clusters {clust_ids_str}.")
return df_res_list
def _de_test_cond(
X: csr_matrix,
cluster_labels: pd.Categorical,
cond_labels: pd.Categorical,
gene_names: List[str],
n_jobs: int,
t: Optional[bool] = False,
fisher: Optional[bool] = False,
temp_folder: Optional[str] = None,
verbose: Optional[bool] = True,
) -> List[pd.DataFrame]:
""" Collect sufficient statistics, run Mann-Whitney U test, calculate auroc, optionally run Welch's T test and Fisher's Exact test on each cluster (DE analysis is based on cond_labels)
"""
start = time.perf_counter()
clust_cond = np.array(list(zip(cluster_labels.codes, cond_labels.codes)), dtype = [("clust", "<i4"), ("cond", "<i4")])
ords = np.argsort(clust_cond)
df_cross = pd.crosstab(cluster_labels.codes, cond_labels.codes)
cluster_cnts = df_cross.values.sum(axis = 1)
cluster_cumsum = cluster_cnts.cumsum()
from pegasus.cylib.de_utils import csr_to_csc_cond
data_list, indices_list, indptrs = csr_to_csc_cond(X.data, X.indices, X.indptr, X.shape[1], ords, cluster_cumsum)
if verbose:
end = time.perf_counter()
logger.info(f"CSR matrix is converted to CSC matrix. Time spent = {end - start:.2f}s.")
# assign from 0 to n and then n to 0; cluster ID is sorted in descending order with respect to number of cells
ords = np.argsort(cluster_cnts)[::-1]
nclust = cluster_cumsum.size
neff = min(n_jobs, nclust)
intervals = []
datalists = []
indiceslists = []
for i in range(neff):
intervals.append([])
datalists.append([])
indiceslists.append([])
pos = 0
sign = 1
for i in range(nclust):
intervals[pos].append(ords[i])
datalists[pos].append(data_list[ords[i]])
indiceslists[pos].append(indices_list[ords[i]])
if pos + sign == neff:
sign = -1
elif pos + sign == -1:
sign = 1
else:
pos += sign
n1arr = df_cross.values
n2arr = cluster_cnts.reshape(-1, 1) - n1arr
cumsum = df_cross.cumsum(axis = 1).values
with parallel_backend("loky", inner_max_num_threads=1):
result_list = Parallel(n_jobs=neff, temp_folder=temp_folder)(
delayed(_perform_de_cond)(
cluster_labels.categories[intervals[i]],
cond_labels,
gene_names,
n1arr[intervals[i]],
n2arr[intervals[i]],
cumsum[intervals[i]],
datalists[i],
indiceslists[i],
indptrs[intervals[i]],
t,
fisher,
verbose,
)
for i in range(neff)
)
df = pd.concat([x for y in result_list for x in y], axis = 1)
return df
@timer(logger=logger)
def de_analysis(
data: Union[MultimodalData, UnimodalData, AnnData],
cluster: str,
condition: Optional[str] = None,
subset: Optional[List[str]] = None,
de_key: Optional[str] = "de_res",
n_jobs: Optional[int] = -1,
t: Optional[bool] = False,
fisher: Optional[bool] = False,
temp_folder: Optional[str] = None,
verbose: Optional[bool] = True,
) -> None:
"""Perform Differential Expression (DE) Analysis on data.
The analysis considers one cluster at one time, comparing gene expression levels on cells
within the cluster with all the others using a number of statistical tools, and determining
up-regulated genes and down-regulated genes of the cluster.
Mann-Whitney U test and AUROC are calculated by default. Welch's T test and Fisher's Exact test are optionally.
The scalability performance on calculating all the test statistics is improved by the inspiration from `Presto <https://github.com/immunogenomics/presto>`_.
Parameters
----------
data: ``MultimodalData``, ``UnimodalData``, or ``anndata.AnnData``
Data matrix with rows for cells and columns for genes.
cluster: ``str``
Cluster labels used in DE analysis. Must exist in ``data.obs``.
condition: ``str``, optional, default: ``None``
Sample attribute used as condition in DE analysis. If ``None``, no condition is considered; otherwise, must exist in ``data.obs``.
If ``condition`` is used, the DE analysis will be performed on cells of each level of ``data.obs[condition]`` respectively, and collect the results after finishing.
subset: ``List[str]``, optional, default: ``None``
Perform DE analysis on only a subset of cluster IDs. Cluster ID subset is specified as a list of strings, such as ``[clust_1,clust_3,clust_5]``, where all IDs must exist in ``data.obs[cluster]``.
de_key: ``str``, optional, default: ``"de_res"``
Key name of DE analysis results stored.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all available threads.
t: ``bool``, optional, default: ``True``
If ``True``, calculate Welch's t test.
fisher: ``bool``, optional, default: ``False``
If ``True``, calculate Fisher's exact test.
temp_folder: ``str``, optional, default: ``None``
Joblib temporary folder for memmapping numpy arrays.
verbose: ``bool``, optional, default: ``True``
If ``True``, show detailed intermediate output.
Returns
-------
``None``
Update ``data.varm``:
``data.varm[de_key]``: DE analysis result.
Examples
--------
>>> pg.de_analysis(data, cluster='spectral_leiden_labels')
>>> pg.de_analysis(data, cluster='louvain_labels', condition='anno')
"""
if cluster not in data.obs:
raise ValueError("Cannot find cluster label!")
cluster_labels = data.obs[cluster].values
if not | is_categorical_dtype(cluster_labels) | pandas.api.types.is_categorical_dtype |
import os
import sys
import torch
import pickle
import argparse
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn as skl
import tensorflow as tf
from scipy.stats import gamma
from callbacks import RegressionCallback
from regression_data import generate_toy_data
from regression_models import GammaNormalRegression
def write_pickle(data, filename):
# Verify folder structure
is_filename_in_folder = len(filename.split('/')) > 1
if is_filename_in_folder:
assert os.path.exists(os.path.dirname(
filename)), f'The path {os.path.dirname(filename)} does not correspond to any existing path'
with open(filename, 'wb') as outfile:
pickle.dump(data, outfile)
return
class MeanVarianceLogger(object):
def __init__(self):
self.cols_data = ['Algorithm', 'Prior', 'x', 'y']
self.df_data = | pd.DataFrame(columns=self.cols_data) | pandas.DataFrame |
__author__ = 'thor'
import pandas as pd
import ut.util.ulist as util_ulist
import re
import ut.pcoll.order_conserving as colloc
def incremental_merge(left, right, **kwargs):
"""
as pandas.merge, but can handle the case when left dataframe is empty or None
"""
if left is None or left.shape != (0, 0):
return | pd.merge(left, right, **kwargs) | pandas.merge |
''' Starting with Commonwealth_Connect_Service_Requests.csv, meaning
the tickets feature. See more info in notebook #2
'''
import pandas as pd
import numpy as np
from geopy.distance import geodesic
def find_nearest_building(df,latI,lonI):
minDist = 4000
flag = True
for i in range(0,df.shape[0]):
lat = df['lat'].iloc[i]
lon = df['lon'].iloc[i]
dist = geodesic([lat,lon],[latI,lonI]).meters
if dist<minDist:
minDist = dist
nearestBuildingInDf = i
if minDist==4000:
flag=False
nearestBuildingInDf = pd.DataFrame()
return nearestBuildingInDf,flag
def fixLonLat(df,colName):
# extracting the lat/lon info to answer the question of where they are located:
extract = df[colName]
extract = extract.apply(lambda x: x.split('(',1)[1])
extract = extract.apply(lambda x: x.split(')',1)[0])
df['lat'] = extract.apply(lambda x: x.split(',',1)[0])
df['lon'] = extract.apply(lambda x: x.split(',',1)[1])
print('Range of Latitudes for input coordinates:',df['lat'].min(),df['lat'].max())
print('Range of Longitudes for input coordinates:',df['lon'].min(),df['lon'].max())
df['lat'] = df['lat'].astype('float')
df['lon'] = df['lon'].astype('float')
return df
def minMaxCoords(lat,lon,dlat,dlon):
minLat = lat-dlat
maxLat = lat+dlat
minLon = lon-dlon
maxLon = lon+dlon
return minLat,maxLat,minLon,maxLon
def findIdentifier(tickets,identifierLocation,dlat,dlon):
# running over the tickets/complaints, cutting part of the identifierLocation DataaFrame close to each
# ticket location, and finding the closest match building wise:
tickets_feature = pd.DataFrame()
tmp = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch import nn
from torch import optim
from torch.nn.modules import loss
from torch.utils.data import Subset, DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from dataset import HogweedClassificationDataset
from plant_clef_resnet import load_plant_clef_resnet18
def train(model: nn.Module, train_loader: DataLoader, optimizer: optim.Optimizer,
loss_function: nn.Module, current_epoch_number: int = 0,
device: torch.device = None, batch_reports_interval: int = 10):
""" Training a provided model using provided data etc. """
model.train()
loss_accum = 0
for batch_idx, (data, target) in enumerate(train_loader):
# throwing away the gradients
optimizer.zero_grad()
# predicting scores
output = model(data.to(device))
# computing the error
loss = loss_function(output, target.unsqueeze(dim=-1).float().to(device))
# saving loss for stats
loss_accum += loss.item() / len(data)
# computing gradients
loss.backward()
# updating the model's weights
optimizer.step()
if batch_idx % batch_reports_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tAveraged Epoch Loss: {:.6f}'.format(
current_epoch_number,
batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss_accum / (batch_idx + 1)))
def sigmoid2predictions(output: torch.Tensor) -> torch.Tensor:
""" model.predict(X) based on sigmoid scores """
return (torch.sign(output - 0.5) + 1) / 2
def test(model, test_loader, loss_function, device):
""" Testing an already trained model using the provided data from `test_loader` """
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for data, target in test_loader:
target = target.float().unsqueeze(dim=-1).to(device)
output = model(data.to(device))
pred = sigmoid2predictions(output)
test_loss += loss_function(output, target).sum().item()
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('...validation: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def set_parameter_requires_grad(model: nn.Module, requires_grad: bool):
for param in model.parameters():
param.requires_grad = requires_grad
if __name__ == "__main__":
from argparse import ArgumentParser
from datetime import datetime
parser = ArgumentParser()
parser.add_argument("--seed", default=160)
parser.add_argument("--val_fraction", default=0.4)
parser.add_argument("--batch_size", default=4)
parser.add_argument("--l1size", default=128)
parser.add_argument("--dropout", default=0.8)
parser.add_argument("--epochs", default=5)
parser.add_argument("--unfreeze", default=True)
parser.add_argument("--epochs_unfreeze", default=50)
args = parser.parse_args()
train_set = HogweedClassificationDataset(root="prepared_data/images_train_resized",
transform=transforms.Compose([transforms.ToTensor()]))
print("Splitting data...")
train_indices, val_indices, _, _ = train_test_split(
range(len(train_set)),
train_set.targets,
stratify=train_set.targets,
test_size=args.val_fraction,
shuffle=True,
random_state=args.seed
)
train_loader = torch.utils.data.DataLoader(Subset(train_set, train_indices), batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(Subset(train_set, val_indices), shuffle=False, batch_size=128)
print("CUDA available?", torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Setting up a model...")
pretrained_resnet = load_plant_clef_resnet18(device=device)
set_parameter_requires_grad(pretrained_resnet, requires_grad=False)
pretrained_resnet.fc = nn.Sequential(
nn.Linear(in_features=512, out_features=args.l1size),
nn.ReLU(),
nn.Dropout(p=args.dropout),
nn.Linear(in_features=args.l1size, out_features=1),
nn.Sigmoid()
)
pretrained_resnet = pretrained_resnet.to(device)
optimizer = optim.AdamW(pretrained_resnet.parameters(), amsgrad=True)
loss_function = loss.BCELoss()
print("Starting training...")
for epoch in range(args.epochs):
train(pretrained_resnet, train_loader, optimizer, loss_function, epoch, device)
test(pretrained_resnet, val_loader, loss_function, device)
print("Goodness of fit (evaluation on train):")
test(pretrained_resnet, train_loader, loss_function, device)
if args.unfreeze and args.epochs_unfreeze - 1 == epoch:
print("Unfreezing...")
set_parameter_requires_grad(pretrained_resnet, requires_grad=True)
# GENERATING SUBMISSION
test_set = ImageFolder(root="prepared_data/images_test_resized",
transform=transforms.Compose([transforms.ToTensor()]))
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)
ids = [name.replace(".jpg", "").split("/")[-1].split("\\")[-1] for name, _ in test_set.samples]
pretrained_resnet.eval()
results = {"id": [], "has_hogweed": []}
with torch.no_grad():
for i, (data, _) in enumerate(test_loader):
# # checking if the order is the same just in case
# assert torch.all(test_set[i][0] == data).detach().item()
output = pretrained_resnet(data.to(device))
pred = sigmoid2predictions(output)
results["id"].append(ids[i])
results["has_hogweed"].append(int(pred.detach().item()))
| pd.DataFrame(results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""This functions are based on my own technical analysis library:
https://github.com/bukosabino/ta
You should check it if you need documentation of this functions.
"""
import pandas as pd
import numpy as np
"""
Volatility Indicators
"""
def bollinger_hband(close, n=20, ndev=2):
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
hband = mavg + ndev*mstd
return pd.Series(hband, name='hband')
def bollinger_lband(close, n=20, ndev=2):
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
lband = mavg - ndev*mstd
return pd.Series(lband, name='lband')
def bollinger_mavg(close, n=20):
mavg = close.rolling(n).mean()
return pd.Series(mavg, name='mavg')
def bollinger_hband_indicator(close, n=20, ndev=2):
df = pd.DataFrame([close]).transpose()
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
hband = mavg + ndev*mstd
df['hband'] = 0.0
df.loc[close > hband, 'hband'] = 1.0
return pd.Series(df['hband'], name='bbihband')
def bollinger_lband_indicator(close, n=20, ndev=2):
df = pd.DataFrame([close]).transpose()
mavg = close.rolling(n).mean()
mstd = close.rolling(n).std()
lband = mavg - ndev*mstd
df['lband'] = 0.0
df.loc[close < lband, 'lband'] = 1.0
return pd.Series(df['lband'], name='bbilband')
def donchian_channel_hband(close, n=20):
hband = close.rolling(n).max()
return | pd.Series(hband, name='dchband') | pandas.Series |
from dataclasses import dataclass
import traceback
import random
import itertools
import constraint
@dataclass
class Player:
name: str
hand: list
def take_turn(self):
print('this is your hand:', self.hand)
while True:
command = input('What do you want to do? [suggest, accuse]: ')
if command == 'suggest':
while True:
suggestion = input('Suggest? (person,weapon,room): ')
try:
person, weapon, room = suggestion.split(',')
if not person in game.people:
print(f'{person} not in {game.people}')
continue
if not weapon in game.weapons:
print(f'{weapon} not in {game.weapons}')
continue
if not room in game.rooms:
print(f'{room} not in {game.rooms}')
continue
return Suggestion(self.name, person, weapon, room)
except Exception as e:
print('Could not understand you, please try again')
traceback.print_exc()
continue
elif command == 'accuse':
while True:
suggestion = input('Accuse? (person,weapon,room): ')
try:
person, weapon, room = suggestion.split(',')
if not person in game.people:
print(f'{person} not in {game.people}')
continue
if not weapon in game.weapons:
print(f'{weapon} not in {game.weapons}')
continue
if not room in game.rooms:
print(f'{room} not in {game.rooms}')
continue
return Accusation(self.name, person, weapon, room)
except Exception as e:
print('Could not understand you, please try again')
traceback.print_exc()
continue
else:
print('Please choose one of [suggest, accuse].')
continue
def respond_to_suggestion(self, suggestion):
print(f'{self.name}, do you have any cards that disprove this? ')
suggested_cards_in_hand = [
c for c in self.hand if c in suggestion.cards]
if not any(suggested_cards_in_hand):
return Pass(self.name)
else:
while True:
card_to_show = input(f'Choose a card from {suggested_cards_in_hand}: ')
if not card_to_show in suggested_cards_in_hand:
print(
'That wasn\t a card that matches the suggestion.')
continue
else:
# suggesting_player.get_shown(self.name, card_to_show)
return ShowedCard(self.name, card_to_show)
def get_shown(self, otherplayer_name, card):
print(f'{otherplayer_name} showed {self.name} {card}.')
def add_card(self, card):
self.hand.append(card)
def suggestion_disproved(self, suggestion, disproving_player_name):
pass
class SimpleConstraintAIPlayer(Player):
""" Keep track of the cards in hand and the ones that have been shown. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.problem = constraint.Problem()
self.problem.addVariable('person', Clue.people)
self.problem.addVariable('weapon', Clue.weapons)
self.problem.addVariable('room', Clue.rooms)
def add_card(self, card):
self.problem.addConstraint(constraint.NotInSetConstraint(card))
self.hand.append(card)
def get_shown(self, showed_card):
print(f'{showed_card.player_name} showed {self.name} {showed_card.card}')
self.problem.addConstraint(
constraint.NotInSetConstraint(showed_card.card))
def suggestion_disproved(self, suggestion, disproving_player_name):
pass
def respond_to_suggestion(self, suggestion):
suggested_cards_in_hand = [
c for c in self.hand if c in suggestion.cards]
if not any(suggested_cards_in_hand):
return Pass(self.name)
else:
card_to_show = random.sample(suggested_cards_in_hand, k=1)[0]
return ShowedCard(self.name, card_to_show)
def take_turn(self):
solutions = self.problem.getSolutions()
print(f'{self.name}', len(solutions), solutions if len(solutions) < 10 else None)
if len(solutions) == 1: # certainty is 1, accuse
s = solutions[0]
return Accusation(self.name, s['person'], s['weapon'], s['room'])
else: # still a possibility
s = random.sample(solutions, k=1)[0]
return Suggestion(self.name, s['person'], s['weapon'], s['room'])
class BetterConstraintAIPlayer(SimpleConstraintAIPlayer):
""" keep track of what cards other players have potentially revealed to each other. """
def suggestion_disproved(self, suggestion, disproving_player_name):
self.problem.addConstraint(
constraint.SomeNotInSetConstraint(suggestion.cards))
class RevealLessInfoAIPlayer(SimpleConstraintAIPlayer):
""" Keep track of which cards which have been revealed. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.revealed_cards = []
def respond_to_suggestion(self, suggestion):
suggested_cards_in_hand = [
c for c in self.hand if c in suggestion.cards]
if not any(suggested_cards_in_hand):
return Pass(self.name)
else:
previously_revealed_cards = [
c for c in suggested_cards_in_hand if c in self.revealed_cards]
if previously_revealed_cards:
card_to_show = random.sample(previously_revealed_cards, k=1)[0]
else:
card_to_show = random.sample(suggested_cards_in_hand, k=1)[0]
self.revealed_cards.append(card_to_show)
return ShowedCard(self.name, card_to_show)
class RevealLessInfoBetterAIPlayer(BetterConstraintAIPlayer):
""" Keep track of which cards which have been revealed. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.revealed_cards = []
def respond_to_suggestion(self, suggestion):
suggested_cards_in_hand = [
c for c in self.hand if c in suggestion.cards]
if not any(suggested_cards_in_hand):
return Pass(self.name)
else:
previously_revealed_cards = [
c for c in suggested_cards_in_hand if c in self.revealed_cards]
if previously_revealed_cards:
card_to_show = random.sample(previously_revealed_cards, k=1)[0]
else:
card_to_show = random.sample(suggested_cards_in_hand, k=1)[0]
self.revealed_cards.append(card_to_show)
return ShowedCard(self.name, card_to_show)
@dataclass
class Pass:
player_name: str
@dataclass
class ShowedCard:
player_name: str
card: str
@dataclass
class Suggestion:
player_name: str
person: str
weapon: str
room: str
@property
def cards(self):
return self.person, self.weapon, self.room
@dataclass
class Accusation(Suggestion):
pass
class Win(Exception):
pass
class Clue:
people = ('scarlet', 'plum', 'green', 'mustard', 'white', 'blue')
weapons = ('pipe', 'knife', 'candlestick', 'rope', 'gun', 'wrench')
rooms = ('dining', 'kitchen', 'billiards', 'ball',
'hall', 'conservatory', 'library', 'study', 'lounge')
full_deck = people + weapons + rooms
def __init__(self, *args, **kwargs):
self.players = []
self.events = []
self.clues = None
self.turncount = 0
self.setup(*args, **kwargs)
def setup(self, n_real_players=1, n_players=4, aiplayer=itertools.cycle([SimpleConstraintAIPlayer]), seed=None):
if seed:
random.seed(seed)
people, weapons, rooms = list(Clue.people), list(
Clue.weapons), list(Clue.rooms)
p = random.sample(people, k=1)[0]
people.remove(p)
w = random.sample(weapons, k=1)[0]
weapons.remove(w)
r = random.sample(rooms, k=1)[0]
rooms.remove(r)
self.clues = (p, w, r)
print('SECRET:', self.clues)
deck = people + weapons + rooms
print(deck)
print(len(Clue.full_deck), len(deck))
random.shuffle(deck)
players = [Player(name=str(i), hand=[]) for i in range(n_real_players)]
players.extend([next(aiplayer)(name=str(i), hand=[])
for i in range(n_real_players, n_players)])
random.shuffle(players)
while deck:
for p in players:
try:
p.add_card(deck.pop())
except IndexError:
break
self.players = players
print(self.players)
return self
def suggest(self, suggestion):
self.events.append(suggestion)
print(suggestion)
# starting with the player to the left of the current player...
curr_player = [p for p in self.players if p.name ==
suggestion.player_name][0]
curr_idx = self.players.index(curr_player)
for player in self.players[curr_idx+1:] + self.players[:curr_idx]:
response = player.respond_to_suggestion(suggestion)
print(response)
self.events.append(response)
if isinstance(response, ShowedCard):
curr_player.get_shown(response)
for p in self.players:
p.suggestion_disproved(suggestion, player.name)
break
def accuse(self, accusation):
self.events.append(accusation)
print(accusation)
if accusation.cards == self.clues:
print(f'Player {accusation.player_name} has won!')
win = Win(accusation.player_name)
win.player = [p for p in self.players if p.name ==
accusation.player_name][0]
raise win
else:
print(f'Player {accusation.player_name} has made an incorrect accusation. They lose.')
# remove them from the turn order
player = [p for p in self.players if p.name ==
accusation.player_name][0]
self.players.remove(player)
# redistribute cards
while player.hand:
for p in self.players:
try:
p.add_card(player.hand.pop())
except IndexError:
break
def run(self):
self.turncount = 0
while self.players:
for player in self.players:
try:
self.turncount += 1
print(f'#{self.turncount}: Player {player.name}\'s turn')
response = player.take_turn()
if isinstance(response, Accusation):
self.accuse(response)
if isinstance(response, Suggestion):
self.suggest(response)
except Win as e:
self.winner = e
return self
def run_experiment(n_runs=100, **kwargs):
import sys
import time
experiment = {}
experiment['kwargs'] = kwargs.copy()
experiment['kwargs']['n_runs'] = n_runs
with open('test.log', 'w') as f:
_stdout = sys.stdout
sys.stdout = f
t1 = time.time()
runs = [Clue(**kwargs).run()
for i in range(n_runs)]
t2 = time.time()
sys.stdout = _stdout
# print(kwargs)
# print(n_runs, 'runs in', t2 - t1, 'average = ', (t2 - t1) / n_runs)
experiment['avg_turns'] = round(
sum(r.turncount for r in runs) / len(runs), 1)
# print('Average Turns: ', avg_turns)
wins = {}
for r in runs:
wins.setdefault(str(r.winner), 0)
wins[str(r.winner)] += 1
# print('Wins:', wins)
experiment['wins'] = wins
return experiment
def run_full_compare_experiment(n_runs=100, **kwargs):
players = [SimpleConstraintAIPlayer, BetterConstraintAIPlayer,
RevealLessInfoAIPlayer, RevealLessInfoBetterAIPlayer]
import sys
import time
results = {}
with open('test.log', 'w') as f:
_stdout = sys.stdout
sys.stdout = f
t1 = time.time()
for p1, p2 in itertools.permutations(players, 2):
aiplayer = itertools.cycle([p1, p2])
runs = [Clue(**kwargs, aiplayer=aiplayer).run() for i in range(n_runs)]
results.setdefault(p1.__name__ + 'p1', {})
results[p1.__name__ + 'p1'].setdefault(p2.__name__, 0)
num = 0
denom = 0
for r in runs:
if r.winner.player.__class__.__name__ == p1.__name__:
num += 1
else:
denom += 1
results[p1.__name__ + 'p1'][p2.__name__] = num / denom
t2 = time.time()
sys.stdout = _stdout
return results
if __name__ == '__main__':
# c = Clue(n_real_players=0)
# c.run()
onegoodplayer = itertools.cycle(
[BetterConstraintAIPlayer, SimpleConstraintAIPlayer, SimpleConstraintAIPlayer])
allgoodplayer = itertools.cycle([BetterConstraintAIPlayer])
nogoodplayer = itertools.cycle([SimpleConstraintAIPlayer])
# run_experiment(n_runs=1000, n_real_players=0,
# n_players=3, aiplayer=nogoodplayer)
# run_experiment(n_runs=1000, n_real_players=0,
# n_players=3, aiplayer=onegoodplayer)
# run_experiment(n_runs=1000, n_real_players=0,
# n_players=3, aiplayer=allgoodplayer)
# run_experiment(n_runs=100, n_real_players=0,
# n_players=4, aiplayer=onegoodplayer)
# run_experiment(n_runs=100, n_real_players=0,
# n_players=5, aiplayer=onegoodplayer)
# run_experiment(n_runs=100, n_real_players=0,
# n_players=6, aiplayer=onegoodplayer)
# ex = run_experiment(n_runs=100, n_players=3, n_real_players=0, aiplayer=itertools.cycle(
# [RevealLessInfoAIPlayer, SimpleConstraintAIPlayer, SimpleConstraintAIPlayer]))
# print(ex)
ex = run_full_compare_experiment(n_real_players=0, n_players=4)
print(ex)
import pandas as pd
df = | pd.DataFrame(ex) | pandas.DataFrame |
from __future__ import print_function
import baker
import logging
import core.io
from core.cascade import group_offsets
def truncate_data(x, y, qid, docno, k):
"""Truncate each ranked list down to at most k documents"""
import numpy as np
idx = np.concatenate([np.arange(a, min(a + k, b)) for a, b in group_offsets(qid)])
new_docno = docno[idx] if docno is not None else None
return x[idx], y[idx], qid[idx], new_docno
@baker.command
def make_npz(input_file, npz_file, k=0):
"""Convert input data (SVMLight or .npz) into .npz format"""
if input_file.endswith('.npz'):
x, y, qid, docno = core.io.load_npz(input_file)
else:
x, y, qid, docno = core.io.load_svmlight_file(input_file)
# eliminate explicit zeros
x.eliminate_zeros()
# truncate data as necessary
if k:
x, y, qid, docno = truncate_data(x, y, qid, docno, k)
core.io.save_npz(npz_file, x, y, qid, docno)
@baker.command
def merge_npz(*npz_file):
"""Merge multiple npz files (*** EXPERIMENTAL ***)"""
import numpy as np
import scipy.sparse
dest_npz_file = npz_file[-1] # the last filename is the destination
npz_file = npz_file[:-1]
x_list, y_list, qid_list, docno_list = [], [], [], []
for fname in npz_file:
x, y, qid, docno = core.io.load_npz(fname)
x.eliminate_zeros() # eliminate explicit zeros
print(fname, x.shape, y.shape, qid.shape, docno.shape,
'fid:[{}, {}]'.format(x.indices.min(), x.indices.max()))
x_list.append(x)
y_list.append(y)
qid_list.append(qid)
docno_list.append(docno)
n_features = max(x.shape[1] for x in x_list)
for i in range(len(x_list)):
if x_list[i].shape[1] == n_features:
continue
new_shape = (x_list[i].shape[0], n_features)
x_list[i] = scipy.sparse.csr_matrix((x_list[i].data, x_list[i].indices, x_list[i].indptr),
shape=new_shape)
x_new = scipy.sparse.vstack(x_list)
print('x', type(x_new), x_new.shape, 'fid:[{}, {}]'.format(x_new.indices.min(), x_new.indices.max()))
y_new = np.concatenate(y_list)
print('y', type(y_new), y_new.shape)
qid_new = np.concatenate(qid_list)
print('qid', type(qid_new), qid_new.shape)
docno_new = np.concatenate(docno_list)
print('docno', type(docno_new), docno_new.shape)
core.io.save_npz(dest_npz_file, x_new, y_new, qid_new, docno_new)
@baker.command
def show_npz_info(*npz_file):
import numpy as np
for fname in npz_file:
print('filename', fname)
x, y, qid, docno = core.io.load_npz(fname)
if docno is not None:
print('x', x.shape, 'y', y.shape, 'qid', qid.shape, 'docno', docno.shape)
else:
print('x', x.shape, 'y', y.shape, 'qid', qid.shape, 'docno', None)
print('labels:', {int(k): v for k, v in zip(*map(list, np.unique(y, return_counts=True)))})
unique_qid = np.unique(qid)
print('qid (unique):', unique_qid.size)
print(unique_qid)
print()
@baker.command
def make_qrels(data_file, qrels_file):
"""Create qrels from an svmlight or npz file."""
with open(qrels_file, 'wb') as out:
if data_file.endswith('.npz'):
_, y, qid, docno = core.io.load_npz(data_file)
for a, b in group_offsets(qid):
if docno is None:
docno_string = ['%s.%d' % (qid[a], i) for i in range(1, b - a + 1)]
else:
docno_string = docno[a:b]
for d, rel in zip(docno_string, y[a:b]):
out.write('%s 0 %s %d\n' % (qid[a], d, rel))
else:
for qid, docno, rel in core.io.parse_svmlight_into_qrels(data_file):
out.write('%s 0 %s %d\n' % (qid, docno, rel))
@baker.command
def make_svm(*csv_file):
"""Convert CSV files into SVMLight format
Format: <label>,<query id>,<docno>,f1,f2,...,fn
"""
import itertools
import pandas as pd
fid = itertools.count(1)
frames = []
for fname in csv_file:
df = | pd.read_csv(fname, sep=',', header=None) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Apode Project (https://github.com/mchalela/apode).
# Copyright (c) 2020, <NAME> and <NAME>
# License: MIT
# Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt
from apode import datasets
from apode.basic import ApodeData
import numpy as np
import pandas as pd
import pytest
# =============================================================================
# TESTS COMMON
# =============================================================================
def test_default_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("headcount", pline=pline)
method_result = ad.poverty.headcount(pline=pline)
assert call_result == method_result
def test_invalid():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(AttributeError):
ad.poverty("foo")
def test_get_pline_none():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
# pline is None
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.headcount(pline=None) == ad.poverty.headcount(
pline=pline
)
def test_get_pline_factor():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# factor < 0:
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline=pline, factor=-3)
with pytest.raises(ValueError):
ad.poverty.chakravarty(pline=pline, factor=-3)
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline=None, factor=-3)
with pytest.raises(ValueError):
ad.poverty.chakravarty(pline=None, factor=-3)
def test_get_pline_median():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
factor = 0.3
pline = factor * np.median(ad.data.values)
assert ad.poverty.headcount(
pline="median", factor=factor
) == ad.poverty.headcount(pline=pline)
def test_get_pline_mean():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
factor = 0.3
pline = factor * np.mean(ad.data.values)
assert ad.poverty.headcount(
pline="mean", factor=factor
) == ad.poverty.headcount(pline=pline)
def test_get_pline_quantile():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
# pline = "quantile"
q = 0.3
factor = 0.3
pline = factor * np.quantile(ad.data.values, q)
assert ad.poverty.chakravarty(
pline="quantile", factor=factor, q=q
) == ad.poverty.chakravarty(pline=pline)
assert ad.poverty.hagenaars(
pline="quantile", factor=factor, q=q
) == ad.poverty.hagenaars(pline=pline)
# pline = "quantile", q out of range
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline="quantile", q=1.2)
with pytest.raises(ValueError):
ad.poverty.hagenaars(pline="quantile", q=-0.2)
# =============================================================================
# TESTS HEADCOUNT
# =============================================================================
def test_headcount_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.headcount(pline=pline) == 0.27
def test_headcount_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("headcount", pline=pline) == 0.27
def test_headcount_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("headcount", pline=pline)
method_result = ad.poverty.headcount(pline=pline)
assert call_result == method_result
def test_headcount_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("headcount", pline=-1)
with pytest.raises(ValueError):
ad.poverty("headcount", pline=0)
def test_headcount_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=100, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("headcount", pline=pline_min) == 0
assert ad.poverty("headcount", pline=pline_max) == 1
def test_headcount_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="headcount", pline=pline) == ad2.poverty(
method="headcount", pline=pline
)
def test_headcount_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("headcount", pline=pline) == ad2.poverty(
"headcount", pline=pline
)
def test_headcount_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("headcount", pline=pline) == ad2.poverty(
"headcount", pline=pline * k
)
# =============================================================================
# TESTS GAP
# =============================================================================
def test_gap_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.gap(pline=pline) == 0.13715275200855706
def test_gap_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("gap", pline=pline) == 0.13715275200855706
def test_gap_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("gap", pline=pline)
method_result = ad.poverty.gap(pline=pline)
assert call_result == method_result
def test_gap_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("gap", pline=-1)
def test_gap_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("gap", pline=pline_min) == 0
assert ad.poverty("gap", pline=pline_max) <= 1
def test_gap_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="gap", pline=pline) == ad2.poverty(
method="gap", pline=pline
)
def test_gap_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("gap", pline=pline), ad2.poverty("gap", pline=pline)
)
def test_gap_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("gap", pline=pline) == ad2.poverty(
"gap", pline=pline * k
)
# =============================================================================
# TESTS SEVERITY
# =============================================================================
def test_severity_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.severity(pline=pline) == 0.0925444945807559
def test_severity_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("severity", pline=pline) == 0.0925444945807559
def test_severity_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("severity", pline=pline)
method_result = ad.poverty.severity(pline=pline)
assert call_result == method_result
def test_severity_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("severity", pline=-1)
def test_severity_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("severity", pline=pline_min) == 0
assert ad.poverty("severity", pline=pline_max) <= 1
def test_severity_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="severity", pline=pline) == ad2.poverty(
method="severity", pline=pline
)
def test_severity_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("severity", pline=pline),
ad2.poverty("severity", pline=pline),
)
def test_severity_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("severity", pline=pline) == ad2.poverty(
"severity", pline=pline * k
)
# =============================================================================
# TESTS FGT
# =============================================================================
def test_fgt_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.fgt(pline=pline) == 0.27
def test_fgt_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("fgt", pline=pline) == 0.27
def test_fgt_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("fgt", pline=pline)
method_result = ad.poverty.fgt(pline=pline)
assert call_result == method_result
def test_fgt_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("fgt", pline=-1)
with pytest.raises(ValueError):
ad.poverty("fgt", pline=0)
def test_fgt_valid_alpha():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty.fgt(pline=1, alpha=-2)
def test_fgt_alpha_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
assert ad.poverty.fgt(pline=pline, alpha=1) == 0.26003924372489007
assert ad.poverty.fgt(pline=pline, alpha=0) == 0.4766666666666667
assert ad.poverty.fgt(pline=pline, alpha=10) == 0.049479474144909996
def test_fgt_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("fgt", pline=pline_min) == 0
assert ad.poverty("fgt", pline=pline_max) <= 1
def test_fgt_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="fgt", pline=pline) == ad2.poverty(
method="fgt", pline=pline
)
def test_fgt_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("fgt", pline=pline) == ad2.poverty("fgt", pline=pline)
def test_fgt_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("fgt", pline=pline) == ad2.poverty(
"fgt", pline=pline * k
)
# =============================================================================
# TESTS SEN
# =============================================================================
def test_sen_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.sen(pline=pline) == 0.1826297337125855
def test_sen_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("sen", pline=pline) == 0.1826297337125855
def test_sen_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("sen", pline=pline)
method_result = ad.poverty.sen(pline=pline)
assert call_result == method_result
def test_sen_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("sen", pline=-1)
with pytest.raises(ValueError):
ad.poverty("sen", pline=0)
def test_sen_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("sen", pline=pline_min) == 0
assert ad.poverty("sen", pline=pline_max) <= 1
def test_sen_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="sen", pline=pline) == ad2.poverty(
method="sen", pline=pline
)
def test_sen_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("sen", pline=pline) == ad2.poverty(
"sen", pline=pline * k
)
# =============================================================================
# TESTS SST
# =============================================================================
def test_sst_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.sst(pline=pline) == 0.24950968072455512
def test_sst_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("sst", pline=pline) == 0.24950968072455512
def test_sst_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("sst", pline=pline)
method_result = ad.poverty.sst(pline=pline)
assert call_result == method_result
def test_sst_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("sst", pline=-1)
with pytest.raises(ValueError):
ad.poverty("sst", pline=0)
# @pytest.mark.xfail
def test_sst_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("sst", pline=pline_min) == 0
assert ad.poverty("sst", pline=pline_max) <= 1 # CHECK, fails
def test_sst_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="sst", pline=pline) == ad2.poverty(
method="sst", pline=pline
)
def test_sst_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("sst", pline=pline) == ad2.poverty(
"sst", pline=pline * k
)
# =============================================================================
# TESTS WATTS
# =============================================================================
def test_watts_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.watts(pline=pline) == 0.2724322042654472
def test_watts_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty("watts", pline=pline) == 0.2724322042654472
def test_watts_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("watts", pline=pline)
method_result = ad.poverty.watts(pline=pline)
assert call_result == method_result
def test_watts_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("watts", pline=-1)
with pytest.raises(ValueError):
ad.poverty("watts", pline=0)
def test_watts_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
assert ad.poverty("watts", pline=pline_min) == 0
def test_watts_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="watts", pline=pline) == ad2.poverty(
method="watts", pline=pline
)
def test_watts_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("watts", pline=pline), ad2.poverty("watts", pline=pline)
)
def test_watts_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("watts", pline=pline) == ad2.poverty(
"watts", pline=pline * k
)
# =============================================================================
# TESTS CUH
# =============================================================================
def test_cuh_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.cuh(pline=pline) == 0.18341653809400216
def test_cuh_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
assert ad.poverty.cuh(pline=pline) == 0.18341653809400216
def test_cuh_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("cuh", pline=pline)
method_result = ad.poverty.cuh(pline=pline)
assert call_result == method_result
def test_cuh_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("cuh", pline=-1)
with pytest.raises(ValueError):
ad.poverty("cuh", pline=0)
def test_cuh_valid_alpha():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
with pytest.raises(ValueError):
ad.poverty(method="cuh", pline=pline, alpha=-2)
with pytest.raises(ValueError):
ad.poverty(method="cuh", pline=pline, alpha=2)
def test_cuh_alpha_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
assert (
ad.poverty(method="cuh", pline=pline, alpha=0.4) == 0.3739168025918481
)
assert (
ad.poverty(method="cuh", pline=pline, alpha=0) == 0.14377616581364483
)
def test_cuh_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=100, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("cuh", pline=pline_min) == 0 # CHECK, Fails
assert ad.poverty("cuh", pline=pline_max) <= 1
def test_cuh_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="cuh", pline=pline) == ad2.poverty(
method="cuh", pline=pline
)
def test_cuh_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("cuh", pline=pline) == ad2.poverty(
"cuh", pline=pline * k
)
# =============================================================================
# TESTS TAKAYAMA
# =============================================================================
def test_takayama_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty.takayama(pline=pline) == 0.13021647687646376
np.testing.assert_allclose(
ad.poverty.takayama(pline=pline),
0.13021647687646376,
)
def test_takayama_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty("takayama", pline=pline) == 0.13021647687646376
np.testing.assert_allclose(
ad.poverty("takayama", pline=pline),
0.13021647687646376,
)
def test_takayama_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("takayama", pline=pline)
method_result = ad.poverty.takayama(pline=pline)
assert call_result == method_result
def test_takayama_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("takayama", pline=-1)
with pytest.raises(ValueError):
ad.poverty("takayama", pline=0)
def test_takayama_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("takayama", pline=pline_min) == 0
assert ad.poverty("takayama", pline=pline_max) <= 1 # CHE¶CK, fails
def test_takayama_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="takayama", pline=pline) == ad2.poverty(
method="takayama", pline=pline
)
def test_takayama_replication():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = k * ad.data["x"].tolist()
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
np.testing.assert_allclose(
ad.poverty("takayama", pline=pline),
ad2.poverty("takayama", pline=pline),
)
def test_takayama_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty("takayama", pline=pline) == ad2.poverty(
"takayama", pline=pline * k
)
def test_takayama_avoid_zero_div_error():
# u = 0
df = pd.DataFrame({"x": np.zeros(10)})
ad = ApodeData(df, income_column="x")
pline = 0.2
assert ad.poverty.takayama(pline=pline) == 0
# n = 0
# df = pd.DataFrame({"x": []})
# ad = ApodeData(df, income_column="x")
# assert ad.poverty.takayama(pline=pline) == 0
# =============================================================================
# TESTS KAKWANI
# =============================================================================
def test_kakwani_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty.kakwani(pline=pline) == 0.2027705302170293
np.testing.assert_allclose(
ad.poverty.kakwani(pline=pline), 0.2027705302170293
)
def test_kakwani_call():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
# assert ad.poverty("kakwani", pline=pline) == 0.2027705302170293
np.testing.assert_allclose(
ad.poverty("kakwani", pline=pline), 0.2027705302170293
)
def test_kakwani_call_equal_method():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.5 * np.median(ad.data.values)
call_result = ad.poverty("kakwani", pline=pline)
method_result = ad.poverty.kakwani(pline=pline)
assert call_result == method_result
def test_kakwani_valid_pline():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(ValueError):
ad.poverty("kakwani", pline=-1)
with pytest.raises(ValueError):
ad.poverty("kakwani", pline=0)
def test_kakwani_extreme_values():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline_min = np.min(ad.data.values) / 2
pline_max = np.max(ad.data.values) + 1
assert ad.poverty("kakwani", pline=pline_min) == 0
assert ad.poverty("kakwani", pline=pline_max) <= 1
def test_kakwani_symmetry():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
np.random.shuffle(y)
df2 = pd.DataFrame({"x": y})
ad2 = ApodeData(df2, income_column="x")
assert ad.poverty(method="kakwani", pline=pline) == ad2.poverty(
method="kakwani", pline=pline
)
def test_kakwani_homogeneity():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
k = 2 # factor
pline = np.mean(ad.data.values)
y = ad.data["x"].tolist()
y = [yi * k for yi in y]
df2 = | pd.DataFrame({"x": y}) | pandas.DataFrame |
from __future__ import absolute_import
from __future__ import print_function
import pandas as pd
import numpy as np
import itertools
import morphs
import click
import sklearn
import sklearn.linear_model
from sklearn.linear_model import LogisticRegression
from joblib import Parallel, delayed
from six.moves import range
from six.moves import zip
def hold_one_out_neurometric_fit_dist(
representations,
labels,
behavior_subj,
psychometric_params,
shuffle_count=1024,
parallel=True,
n_jobs=morphs.parallel.N_JOBS,
):
"""
fits behavioral psychometric curves using the representation in a hold one out manner
Parameters
-----
representations : np.array
size = (num_data_points, num_dimensions)
labels : iterable of string labels or np array of dtype='U5'
behavior_subj : str
psychometric_params : morphs.load.psychometric_params()
shuffle_count : int, how many times to shuffle
parallel : boolean, whether to parallelize
n_jobs : int > 0, number of parallel jobs to run
Returns
-----
df containing neurometric fits of actual data and shuffled morph dim labeled data
"""
label_df = make_label_df(labels, behavior_subj, psychometric_params)
behavior_df = make_behavior_df(behavior_subj, psychometric_params)
if parallel and n_jobs > 1:
all_samples = Parallel(n_jobs=n_jobs)(
delayed(_calc_samples)(
representations, label_df, behavior_df, idx, shuffle=shuffle
)
for idx, shuffle in [(i, i != 0) for i in range(shuffle_count + 1)]
)
else:
all_samples = [
_calc_samples(representations, label_df, behavior_df, idx, shuffle=shuffle)
for idx, shuffle in [(i, i != 0) for i in range(shuffle_count + 1)]
]
all_samples_df = pd.concat(all_samples, ignore_index=True)
all_samples_df["subj"] = behavior_subj
return all_samples_df
def hold_one_out_neurometric_fit_dist_all_subj(
representations,
labels,
psychometric_params,
shuffle_count=1024,
parallel=True,
n_jobs=morphs.parallel.N_JOBS,
):
"""Runs hold_one_out_neurometric_fit_dist on all subjects"""
all_samples = []
for subj in psychometric_params:
print(subj)
all_samples.append(
hold_one_out_neurometric_fit_dist(
representations,
labels,
subj,
psychometric_params,
shuffle_count=shuffle_count,
parallel=parallel,
n_jobs=n_jobs,
)
)
return pd.concat(all_samples)
def make_label_df(labels, behavior_subj, psychometric_params):
"""Turns labels into a parsed df"""
label_df = pd.DataFrame(data={"stim_id": labels})
morphs.data.parse.stim_id(label_df)
label_df["behave_data"] = False
for dim, dim_group in label_df.groupby("morph_dim"):
if dim in psychometric_params[behavior_subj]:
label_df.loc[dim_group.index, "behave_data"] = True
morphs.data.parse.effective_morph(label_df, behavior_subj)
return label_df
def make_behavior_df(behavior_subj, psychometric_params):
"""Generates behaviorally determined psychometric functions for the given subj in df form"""
morph_dims, morph_poss = list(
zip(
*itertools.product(
list(psychometric_params[behavior_subj].keys()), np.arange(1, 129)
)
)
)
behavior_df = pd.DataFrame(data={"morph_dim": morph_dims, "morph_pos": morph_poss})
behavior_df["lesser_dim"] = behavior_df["morph_dim"].str[0]
behavior_df["greater_dim"] = behavior_df["morph_dim"].str[1]
morphs.data.parse.effective_morph(behavior_df, behavior_subj)
for dim, dim_group in behavior_df.groupby("morph_dim"):
psyc = morphs.logistic.normalized_four_param_logistic(
psychometric_params[behavior_subj][dim]
)
behavior_df.loc[dim_group.index, "p_greater"] = dim_group["morph_pos"].apply(
psyc
)
behavior_df["p_lesser"] = 1.0 - behavior_df["p_greater"]
behavior_df["p_left"], behavior_df["p_right"] = (
behavior_df["p_lesser"],
behavior_df["p_greater"],
)
behavior_df.loc[behavior_df["inverted"], "p_right"] = behavior_df.loc[
behavior_df["inverted"], "p_lesser"
]
behavior_df.loc[behavior_df["inverted"], "p_left"] = behavior_df.loc[
behavior_df["inverted"], "p_greater"
]
return behavior_df
def _calc_samples(representations, label_df, behavior_df, idx, shuffle=False, tol=1e-4):
error_list, dim_list = fit_held_outs(
_merge_df(label_df, behavior_df, shuffle=shuffle), representations, tol=tol
)
return pd.DataFrame(
data={
"errors": error_list,
"held_out_dim": dim_list,
"shuffle_index": idx,
"shuffled": shuffle,
}
)
def _merge_df(label_df, behavior_df, shuffle=False):
shuffle_effective_dim(label_df, shuffle=shuffle)
shuffle_effective_dim(behavior_df, shuffle=False)
return pd.merge(
label_df,
behavior_df[["shuffled_dim", "effective_pos", "p_left", "p_right"]],
on=["shuffled_dim", "effective_pos"],
how="left",
validate="m:1",
)
def shuffle_effective_dim(df, shuffle=False):
"""Generates and may shuffle the effective dimension"""
if shuffle:
behave_dims = df[df["behave_data"]]["effective_dim"].unique()
non_behave_dims = set(df["effective_dim"].unique()) - set(behave_dims)
dim_map = {
dim: target
for dim, target in zip(behave_dims, np.random.permutation(behave_dims))
}
dim_map.update({dim: dim for dim in non_behave_dims})
df["shuffled_dim"] = df["effective_dim"].map(dim_map)
else:
df["shuffled_dim"] = df["effective_dim"]
def fit_held_outs(merged_df, representations, accum="sse", tol=1e-4):
"""Fits the neurometric functions and accumulates them"""
mbdf = merged_df[merged_df["behave_data"]]
error_list = []
dim_list = []
for held_out_dim in mbdf["shuffled_dim"].unique():
training_df = mbdf[mbdf["shuffled_dim"] != held_out_dim]
held_out_df = mbdf[mbdf["shuffled_dim"] == held_out_dim]
train_x = np.concatenate(
[
representations[training_df.index, :],
representations[training_df.index, :],
]
)
train_y = np.repeat([0, 1], len(training_df))
train_weights = np.concatenate([training_df["p_left"], training_df["p_right"]])
test_x = representations[held_out_df.index, :]
test_y = held_out_df["p_right"]
model = LogisticRegression(penalty="l2", tol=tol, warm_start=True).fit(
train_x, train_y, sample_weight=train_weights
)
predicted_values = model.predict_proba(test_x)[:, 1]
dim_list.append(held_out_dim)
if accum == "df":
fit_df = held_out_df[["stim_id", "p_right"]].copy()
fit_df["predicted"] = predicted_values
error_list.append(fit_df)
elif accum == "mse":
error_list.append(np.square(predicted_values - test_y).mean())
elif accum == "sse":
error_list.append(np.square(predicted_values - test_y).sum())
elif accum == "sigmoid fit":
raise NotImplementedError
else:
raise Exception("invalid accum option")
return error_list, dim_list
def gen_held_out_df(merged_df, representations, melt=False):
held_out_df = pd.concat(fit_held_outs(merged_df, representations, accum="df")[0])
if melt:
held_out_df = pd.melt(
held_out_df,
id_vars=["stim_id"],
value_vars=["p_right", "predicted"],
var_name="legend",
value_name="p_right",
)
morphs.data.parse.stim_id(held_out_df)
return held_out_df
def logistic_dim_discriminants(X, labels):
"""Returns a dictionary containing the logistic discriminating axis for the endpoints of each morph dimension"""
dim_discriminants = {}
labels = pd.Series(labels)
morph_dims = labels.str[:2].unique()
stim_ids, _ = morphs.data.parse.separate_endpoints(labels)
motif_map = | pd.DataFrame(stim_ids, columns=["motif"]) | pandas.DataFrame |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import namedtuple
from io import StringIO
from typing import Dict, Mapping, Optional, Union
import polyaxon_sdk
from dateutil import parser as dt_parser
from marshmallow import ValidationError, fields, pre_load, validate, validates_schema
from polyaxon.parser import parser
from polyaxon.polyboard.artifacts.kinds import V1ArtifactKind
from polyaxon.schemas.base import BaseConfig, BaseSchema
from polyaxon.utils.np_utils import sanitize_np_types
from polyaxon.utils.tz_utils import now
class EventImageSchema(BaseSchema):
height = fields.Int(allow_none=True)
width = fields.Int(allow_none=True)
colorspace = fields.Int(allow_none=True)
path = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventImage
class V1EventImage(BaseConfig, polyaxon_sdk.V1EventImage):
IDENTIFIER = "image"
SCHEMA = EventImageSchema
REDUCED_ATTRIBUTES = ["height", "width", "colorspace", "path"]
class EventVideoSchema(BaseSchema):
height = fields.Int(allow_none=True)
width = fields.Int(allow_none=True)
colorspace = fields.Int(allow_none=True)
path = fields.Str(allow_none=True)
content_type = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventVideo
class V1EventVideo(BaseConfig, polyaxon_sdk.V1EventVideo):
IDENTIFIER = "video"
SCHEMA = EventImageSchema
REDUCED_ATTRIBUTES = ["height", "width", "colorspace", "path", "content_type"]
class EventDataframeSchema(BaseSchema):
path = fields.Str(allow_none=True)
content_type = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventDataframe
class V1EventDataframe(BaseConfig, polyaxon_sdk.V1EventDataframe):
IDENTIFIER = "dataframe"
SCHEMA = EventDataframeSchema
REDUCED_ATTRIBUTES = ["path", "content_type"]
class EventHistogramSchema(BaseSchema):
values = fields.List(fields.Float(), allow_none=True)
counts = fields.List(fields.Float(), allow_none=True)
@staticmethod
def schema_config():
return V1EventHistogram
class V1EventHistogram(BaseConfig, polyaxon_sdk.V1EventHistogram):
IDENTIFIER = "histogram"
SCHEMA = EventHistogramSchema
REDUCED_ATTRIBUTES = ["values", "counts"]
class EventAudioSchema(BaseSchema):
sample_rate = fields.Float(allow_none=True)
num_channels = fields.Int(allow_none=True)
length_frames = fields.Int(allow_none=True)
path = fields.Str(allow_none=True)
content_type = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventAudio
class V1EventAudio(BaseConfig, polyaxon_sdk.V1EventAudio):
IDENTIFIER = "audio"
SCHEMA = EventAudioSchema
REDUCED_ATTRIBUTES = [
"sample_rate",
"num_channels",
"length_frames",
"path",
"content_type",
]
class V1EventChartKind(polyaxon_sdk.V1EventChartKind):
pass
class EventChartSchema(BaseSchema):
kind = fields.Str(
allow_none=True, validate=validate.OneOf(V1EventChartKind.allowable_values)
)
figure = fields.Dict(allow_none=True)
@staticmethod
def schema_config():
return V1EventChart
class V1EventChart(BaseConfig, polyaxon_sdk.V1EventChart):
IDENTIFIER = "chart"
SCHEMA = EventChartSchema
REDUCED_ATTRIBUTES = ["kind", "figure"]
def to_dict(self, humanize_values=False, unknown=None, dump=False):
if self.kind == V1EventChartKind.PLOTLY:
import plotly.tools
obj = self.obj_to_dict(
self, humanize_values=humanize_values, unknown=unknown
)
return json.dumps(obj, cls=plotly.utils.PlotlyJSONEncoder)
# Resume normal serialization
return super().to_dict(humanize_values, unknown, dump)
class V1EventCurveKind(polyaxon_sdk.V1EventCurveKind):
pass
class EventCurveSchema(BaseSchema):
kind = fields.Str(
allow_none=True, validate=validate.OneOf(V1EventCurveKind.allowable_values)
)
x = fields.List(fields.Float(), allow_none=True)
y = fields.List(fields.Float(), allow_none=True)
annotation = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventCurve
class V1EventCurve(BaseConfig, polyaxon_sdk.V1EventCurve):
IDENTIFIER = "curve"
SCHEMA = EventCurveSchema
REDUCED_ATTRIBUTES = ["kind", "x", "y", "annotation"]
class EventArtifactSchema(BaseSchema):
kind = fields.Str(
allow_none=True, validate=validate.OneOf(V1ArtifactKind.allowable_values)
)
path = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1EventArtifact
class V1EventArtifact(BaseConfig, polyaxon_sdk.V1EventArtifact):
IDENTIFIER = "artifact"
SCHEMA = EventArtifactSchema
REDUCED_ATTRIBUTES = ["kind", "path"]
class EventModelSchema(BaseSchema):
framework = fields.Str(allow_none=True)
path = fields.Str(allow_none=True)
spec = fields.Raw(allow_none=True)
@staticmethod
def schema_config():
return V1EventModel
class V1EventModel(BaseConfig, polyaxon_sdk.V1EventModel):
IDENTIFIER = "artifact"
SCHEMA = EventModelSchema
REDUCED_ATTRIBUTES = ["framework", "path", "spec"]
class EventSchema(BaseSchema):
timestamp = fields.DateTime(allow_none=True)
step = fields.Int(allow_none=True)
metric = fields.Float(allow_none=True)
image = fields.Nested(EventImageSchema, allow_none=True)
histogram = fields.Nested(EventHistogramSchema, allow_none=True)
audio = fields.Nested(EventAudioSchema, allow_none=True)
video = fields.Nested(EventVideoSchema, allow_none=True)
html = fields.Str(allow_none=True)
text = fields.Str(allow_none=True)
chart = fields.Nested(EventChartSchema, allow_none=True)
curve = fields.Nested(EventCurveSchema, allow_none=True)
artifact = fields.Nested(EventArtifactSchema, allow_none=True)
model = fields.Nested(EventModelSchema, allow_none=True)
dataframe = fields.Nested(EventDataframeSchema, allow_none=True)
@staticmethod
def schema_config():
return V1Event
@pre_load
def pre_validate(self, data, **kwargs):
if data.get("image") is not None:
data["image"] = parser.get_dict(
key="image",
value=data["image"],
)
if data.get("histogram") is not None:
data["histogram"] = parser.get_dict(
key="histogram",
value=data["histogram"],
)
if data.get("audio") is not None:
data["audio"] = parser.get_dict(
key="audio",
value=data["audio"],
)
if data.get("video") is not None:
data["video"] = parser.get_dict(
key="video",
value=data["video"],
)
if data.get("chart") is not None:
data["chart"] = parser.get_dict(
key="chart",
value=data["chart"],
)
if data.get("curve") is not None:
data["curve"] = parser.get_dict(
key="curve",
value=data["curve"],
)
if data.get("artifact") is not None:
data["artifact"] = parser.get_dict(
key="artifact",
value=data["artifact"],
)
if data.get("model") is not None:
data["model"] = parser.get_dict(
key="model",
value=data["model"],
)
if data.get("dataframe") is not None:
data["dataframe"] = parser.get_dict(
key="dataframe",
value=data["dataframe"],
)
return data
@validates_schema
def validate_event(self, values, **kwargs):
count = 0
def increment(c):
c += 1
if c > 1:
raise ValidationError(
"An event should have one and only one primitive, found {}.".format(
c
)
)
return c
if values.get("metric") is not None:
count = increment(count)
if values.get("image") is not None:
count = increment(count)
if values.get("histogram") is not None:
count = increment(count)
if values.get("audio") is not None:
count = increment(count)
if values.get("video") is not None:
count = increment(count)
if values.get("html") is not None:
count = increment(count)
if values.get("text") is not None:
count = increment(count)
if values.get("chart") is not None:
count = increment(count)
if values.get("curve") is not None:
count = increment(count)
if values.get("artifact") is not None:
count = increment(count)
if values.get("model") is not None:
count = increment(count)
if values.get("dataframe") is not None:
count = increment(count)
if count != 1:
raise ValidationError(
"An event should have one and only one primitive, found {}.".format(
count
)
)
class V1Event(BaseConfig, polyaxon_sdk.V1Event):
SEPARATOR = "|"
IDENTIFIER = "event"
SCHEMA = EventSchema
REDUCED_ATTRIBUTES = [
"metric",
"image",
"histogram",
"audio",
"video",
"html",
"text",
"chart",
"curve",
"artifact",
"model",
"dataframe",
]
@classmethod
def make(
cls,
step: int = None,
timestamp=None,
metric: float = None,
image: V1EventImage = None,
histogram: V1EventHistogram = None,
audio: V1EventAudio = None,
video: V1EventVideo = None,
html: str = None,
text: str = None,
chart: V1EventChart = None,
curve: V1EventCurve = None,
artifact: V1EventArtifact = None,
model: V1EventModel = None,
dataframe: V1EventDataframe = None,
) -> "V1Event":
if isinstance(timestamp, str):
try:
timestamp = dt_parser.parse(timestamp)
except Exception as e:
raise ValidationError("Received an invalid timestamp") from e
return cls(
timestamp=timestamp if timestamp else now(tzinfo=True),
step=step,
metric=metric,
image=image,
histogram=histogram,
audio=audio,
video=video,
html=html,
text=text,
chart=chart,
curve=curve,
artifact=artifact,
model=model,
dataframe=dataframe,
)
def get_value(self, dump=True):
if self.metric is not None:
return str(self.metric) if dump else self.metric
if self.image is not None:
return self.image.to_dict(dump=dump) if dump else self.image
if self.histogram is not None:
return self.histogram.to_dict(dump=dump) if dump else self.histogram
if self.audio is not None:
return self.audio.to_dict(dump=dump) if dump else self.audio
if self.video is not None:
return self.video.to_dict(dump=dump) if dump else self.video
if self.html is not None:
return self.html
if self.text is not None:
return self.text
if self.chart is not None:
return self.chart.to_dict(dump=dump) if dump else self.chart
if self.curve is not None:
return self.curve.to_dict(dump=dump) if dump else self.curve
if self.artifact is not None:
return self.artifact.to_dict(dump=dump) if dump else self.artifact
if self.model is not None:
return self.model.to_dict(dump=dump) if dump else self.model
if self.dataframe is not None:
return self.dataframe.to_dict(dump=dump) if dump else self.dataframe
def to_csv(self) -> str:
values = [
str(self.step) if self.step is not None else "",
str(self.timestamp) if self.timestamp is not None else "",
self.get_value(dump=True),
]
return self.SEPARATOR.join(values)
class V1Events:
ORIENT_CSV = "csv"
ORIENT_DICT = "dict"
def __init__(self, kind, name, df):
self.kind = kind
self.name = name
self.df = df
@staticmethod
def validate_csv(csv: str):
if csv and not os.path.exists(csv):
csv = StringIO(csv)
return csv
@classmethod
def read(
cls, kind: str, name: str, data: Union[str, Dict], parse_dates: bool = True
) -> "V1Events":
import pandas as pd
if isinstance(data, str):
csv = cls.validate_csv(data)
if parse_dates:
df = pd.read_csv(
csv,
sep=V1Event.SEPARATOR,
parse_dates=["timestamp"],
)
else:
df = pd.read_csv(
csv,
sep=V1Event.SEPARATOR,
)
elif isinstance(data, dict):
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import json
import dml
import prov.model
import datetime
import uuid
import pandas as pd
class topCertifiedCompanies(dml.Algorithm):
contributor = 'ashwini_gdukuray_justini_utdesai'
reads = ['ashwini_gdukuray_justini_utdesai.topCompanies', 'ashwini_gdukuray_justini_utdesai.masterList']
writes = ['ashwini_gdukuray_justini_utdesai.topCertCompanies']
@staticmethod
def execute(trial=False):
'''Retrieve some data sets (not using the API here for the sake of simplicity).'''
startTime = datetime.datetime.now()
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('ashwini_gdukuray_justini_utdesai', 'ashwini_gdukuray_justini_utdesai')
masterList = repo['ashwini_gdukuray_justini_utdesai.masterList']
topCompanies = repo['ashwini_gdukuray_justini_utdesai.topCompanies']
masterListDF = pd.DataFrame(list(masterList.find()))
topCompaniesDF = pd.DataFrame(list(topCompanies.find()))
topCompaniesDF = topCompaniesDF.rename(index=str, columns={'Firm': 'Business Name'})
# create a more uniform ID
businessIDs = []
for index, row in topCompaniesDF.iterrows():
busName = row['Business Name']
cleanedText = busName.upper().strip().replace(' ','').replace('.','').replace(',','').replace('-','')
businessIDs.append(cleanedText)
topCompaniesDF['B_ID'] = | pd.Series(businessIDs, index=topCompaniesDF.index) | pandas.Series |
"""
This script is the entry point of a SageMaker TrainingJob for TFIDF
"""
from typing import Optional
from datetime import datetime
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from training import TrainerBase, LocalPaths
from training.preprocessing import Preprocessor
from training.utils_for_sagemaker import copy_artifacts_for_outputs_if_on_sagemaker
from emo_classifier.classifiers.tfidf import TfidfClassifier
from emo_classifier.metrics import TrainingMetrics
local_paths = LocalPaths()
class TfidfTrainer(TrainerBase):
def __init__(self, min_df: int = 10):
super().__init__()
self.classifier = TfidfClassifier(min_df=min_df)
self.training_metrics: Optional[TrainingMetrics] = None
def fit(self, X_train: pd.Series, Y_train: pd.DataFrame):
X_vectorized = self.classifier.tfidf_vectorizer.fit_transform(X_train)
plr = GridSearchCV(
OneVsRestClassifier(LogisticRegression(random_state=0, solver="liblinear", fit_intercept=False)),
param_grid={"estimator__C": [1.0, 10, 100]},
cv=5,
scoring="roc_auc_ovr",
return_train_score=True,
n_jobs=4,
)
plr.fit(X_vectorized, Y_train)
self.classifier.model = plr.best_estimator_
self.logger.info("Training finished")
cols = ["rank_test_score", "mean_test_score", "std_test_score", "mean_train_score", "mean_fit_time"]
df_cv_results = | pd.DataFrame(plr.cv_results_) | pandas.DataFrame |
# Utility functions supporting experiments
import os
import warnings
import numpy as np
import pandas as pd
import netCDF4
# import xarray as xr
import subprocess
from datetime import datetime, timedelta
import collections
import itertools
import time
import sys
from filelock import FileLock
from functools import partial
from src.utils.general_util import printf, tic, toc
# Some global variables
# Forecast id to file name
forecast_id_to_fname_mapping = {
"subx_cfsv2-precip": "subx-cfsv2-precip-all_leads-8_periods_avg",
"subx_cfsv2-tmp2m": "subx-cfsv2-tmp2m-all_leads-8_periods_avg",
"subx_geos_v2p1-precip": "subx-geos_v2p1-precip-all_leads-4_periods_avg",
"subx_geos_v2p1-tmp2m": "subx-geos_v2p1-tmp2m-all_leads-4_periods_avg",
"subx_nesm-precip": "subx-nesm-precip-all_leads-4_periods_avg",
"subx_nesm-tmp2m": "subx-nesm-tmp2m-all_leads-4_periods_avg",
"subx_ccsm4-precip": "subx-ccsm4-precip-all_leads-4_periods_avg",
"subx_ccsm4-tmp2m": "subx-ccsm4-tmp2m-all_leads-4_periods_avg",
"subx_cfsv2-precip-us": "subx-cfsv2-precip-all_leads-8_periods_avg-us",
"subx_cfsv2-tmp2m-us": "subx-cfsv2-tmp2m-all_leads-8_periods_avg-us",
"subx_geos_v2p1-precip-us": "subx-geos_v2p1-precip-all_leads-4_periods_avg-us",
"subx_geos_v2p1-tmp2m-us": "subx-geos_v2p1-tmp2m-all_leads-4_periods_avg-us",
"subx_nesm-precip-us": "subx-nesm-precip-all_leads-4_periods_avg-us",
"subx_nesm-tmp2m-us": "subx-nesm-tmp2m-all_leads-4_periods_avg-us",
"subx_ccsm4-precip-us": "subx-ccsm4-precip-all_leads-4_periods_avg-us",
"subx_ccsm4-tmp2m-us": "subx-ccsm4-tmp2m-all_leads-4_periods_avg-us"
}
# Add nmme to mapping
nmme_ids = ["nmme{idx}-{var}-{target_horizon}".format(
idx=idx, var=var, target_horizon=target_horizon) for idx, var, target_horizon in itertools.product(
["", "0"], ["prate", "tmp2m"], ["34w", "56w"])
]
forecast_id_to_fname_mapping.update({k: k for k in nmme_ids})
def pandas2file(df_to_file_func, out_file):
"""Writes pandas dataframe or series to file, makes file writable by all,
creates parent directories with 777 permissions if they do not exist,
and changes file group ownership to sched_mit_hill
Args:
df_to_file_func - function that writes dataframe to file when invoked,
e.g., df.to_feather
out_file - file to which df should be written
"""
# Create parent directories with 777 permissions if they do not exist
dirname = os.path.dirname(out_file)
if dirname != '':
os.umask(0)
os.makedirs(dirname, exist_ok=True, mode=0o777)
printf("Saving to "+out_file)
with FileLock(out_file+'lock'):
tic()
df_to_file_func(out_file)
toc()
subprocess.call(f"rm {out_file}lock", shell=True)
subprocess.call("chmod a+w "+out_file, shell=True)
subprocess.call("chown $USER:sched_mit_hill "+out_file, shell=True)
def pandas2hdf(df, out_file, key="data", format="fixed"):
"""Write pandas dataframe or series to HDF; see pandas2file for other
side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
key - key to use when writing to HDF
format - format argument of to_hdf
"""
pandas2file(partial(df.to_hdf, key=key, format=format, mode='w'), out_file)
def pandas2feather(df, out_file):
"""Write pandas dataframe or series to feather file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
"""
pandas2file(df.to_feather, out_file)
def pandas2csv(df, out_file, index=False, header=True):
"""Write pandas dataframe or series to CSV file;
see pandas2file for other side effects
Args:
df - pandas dataframe or series
out_file - file to which df should be written
index - write index to file?
header - write header row to file?
"""
pandas2file(partial(df.to_csv, index=index, header=header), out_file)
def subsetlatlon(df, lat_range, lon_range):
"""Subsets df to rows where lat and lon fall into lat_range and lon_range
Args:
df: dataframe with columns 'lat' and 'lon'
lat_range: range of latitude values, as xrange
lon_range: range of longitude values, as xrange
Returns:
Subsetted dataframe
"""
return df.loc[df['lat'].isin(lat_range) & df['lon'].isin(lon_range)]
def createmaskdf(mask_file):
"""Loads netCDF4 mask file and creates an equivalent dataframe in tall/melted
format, with columns 'lat' and 'lon' and rows corresponding to (lat,lon)
combinations with mask value == 1
Args:
mask_file: name of netCDF4 mask file
Returns:
Dataframe with one row for each (lat,lon) pair with mask value == 1
"""
fh = netCDF4.Dataset(mask_file, 'r')
# fh = xr.open_dataset(mask_file)
lat = fh.variables['lat'][:]
lon = fh.variables['lon'][:] + 360
mask = fh.variables['mask'][:]
lon, lat = np.meshgrid(lon, lat)
# mask_df = pd.DataFrame({'lat': lat.flatten(),
# 'lon': lon.flatten(),
# 'mask': mask.flatten()})
mask_df = pd.DataFrame({'lat': lat.flatten(),
'lon': lon.flatten(),
'mask': mask.data.flatten()})
# Retain only those entries with a mask value of 1
mask_df = mask_df.loc[mask_df['mask'] == 1]
# Drop unnecessary 'mask' column
return mask_df.drop('mask', axis=1)
def get_contest_mask():
"""Returns forecast rodeo contest mask as a dataframe
Columns of dataframe are lat, lon, and mask, where mask is a {0,1} variable
indicating whether the grid point should be included (1) or excluded (0).
"""
return createmaskdf("data/masks/fcstrodeo_mask.nc")
def get_us_mask():
"""Returns contiguous U.S. mask as a dataframe
Columns of dataframe are lat, lon, and mask, where mask is a {0,1} variable
indicating whether the grid point should be included (1) or excluded (0).
"""
return createmaskdf("data/masks/us_mask.nc")
def subsetmask(df, mask_df=get_contest_mask()):
"""Subsets df to rows with lat,lon pairs included in both df and mask_df
Args:
df: dataframe with columns 'lat' and 'lon'
mask_df: dataframe created by createmaskdf
Returns:
Subsetted dataframe
"""
return pd.merge(df, mask_df, on=['lat', 'lon'], how='inner')
def get_measurement_variable(gt_id, shift=None):
"""Returns measurement variable name for the given ground truth id
Args:
gt_id: ground truth data string accepted by get_ground_truth
shift: (optional) Number of days by which ground truth measurements
should be shifted forward
"""
suffix = "" if shift is None or shift == 0 else "_shift"+str(shift)
valid_names = ["tmp2m", "tmin", "tmax", "precip", "sst", "icec",
"mei", "mjo", "sce", "sst_2010", "icec_2010"]
for name in valid_names:
if gt_id.endswith(name):
return name+suffix
# for wind or hgt variables, measurement variable name is the same as the
# gt id
if "hgt" in gt_id or "uwnd" in gt_id or "vwnd" in gt_id:
return gt_id+suffix
# for NCEP/NCAR reanalysis surface variables, remove contest_ prefix and
# take the first part of the variable name, before the first period
if gt_id in ["contest_slp", "contest_pr_wtr.eatm", "contest_rhum.sig995",
"contest_pres.sfc.gauss", "contest_pevpr.sfc.gauss"]:
return gt_id.replace("contest_", "").split(".")[0]+suffix
elif gt_id in ["us_slp", "us_pr_wtr.eatm", "us_rhum.sig995",
"us_pres.sfc.gauss", "us_pevpr.sfc.gauss"]:
return gt_id.replace("us_", "").split(".")[0]+suffix
raise ValueError("Unrecognized gt_id "+gt_id)
def get_forecast_variable(gt_id):
"""Returns forecast variable name for the given ground truth id
Args:
gt_id: ground truth data string ending in "precip" or "tmp2m"
"""
if gt_id.endswith("tmp2m"):
return "tmp2m"
if gt_id.endswith("precip"):
return "prate"
raise ValueError("Unrecognized gt_id "+gt_id)
def shift_df(df, shift=None, date_col='start_date', groupby_cols=['lat', 'lon'],
rename_cols=True):
"""Returns dataframe with all columns save for the date_col and groupby_cols
shifted forward by a specified number of days within each group
Args:
df: dataframe to shift
shift: (optional) Number of days by which ground truth measurements
should be shifted forward; date index will be extended upon shifting;
if shift is None or shift == 0, original df is returned, unmodified
date_col: (optional) name of datetime column
groupby_cols: (optional) if all groupby_cols exist, shifting performed
separately on each group; otherwise, shifting performed globally on
the dataframe
rename_cols: (optional) if True, rename columns to reflect shift
"""
if shift is not None and shift != 0:
# Get column names of all variables to be shifted
# If any of groupby_cols+[date_col] do not exist, ignore error
cols_to_shift = df.columns.drop(
groupby_cols+[date_col], errors='ignore')
# Function to shift data frame by shift and extend index
def shift_grp_df(grp_df): return grp_df[cols_to_shift].set_index(
grp_df[date_col]).shift(int(shift), freq="D")
if set(groupby_cols).issubset(df.columns):
# Shift ground truth measurements for each group
df = df.groupby(groupby_cols).apply(shift_grp_df).reset_index()
else:
# Shift ground truth measurements
df = shift_grp_df(df).reset_index()
if rename_cols:
# Rename variables to reflect shift
df.rename(columns=dict(
list(zip(cols_to_shift, [col+"_shift"+str(shift) for col in cols_to_shift]))),
inplace=True)
return df
def load_measurement(file_name, mask_df=None, shift=None):
"""Loads measurement data from a given file name and returns as a dataframe
Args:
file_name: name of HDF5 file from which measurement data will be loaded
mask_df: (optional) mask dataframe of the form returned by subsetmask();
if specified, returned dataframe will be restricted to those lat, lon
pairs indicated by the mask
shift: (optional) Number of days by which ground truth measurements
should be shifted forward; date index will be extended upon shifting
"""
# Load ground-truth data
df = pd.read_hdf(file_name, 'data')
# Convert to dataframe if necessary
if not isinstance(df, pd.DataFrame):
df = df.to_frame()
# Replace multiindex with start_date, lat, lon columns if necessary
if isinstance(df.index, pd.MultiIndex):
df.reset_index(inplace=True)
if mask_df is not None:
# Restrict output to requested lat, lon pairs
df = subsetmask(df, mask_df)
# Return dataframe with desired shift
return shift_df(df, shift=shift, date_col='start_date', groupby_cols=['lat', 'lon'])
def get_first_year(data_id):
"""Returns first year in which ground truth data or forecast data is available
Args:
data_id: forecast identifier beginning with "nmme" or ground truth identifier
accepted by get_ground_truth
"""
if data_id.startswith("global"):
return 2011
if data_id.endswith("precip"):
return 1948
if data_id.startswith("nmme"):
return 1982
if data_id.endswith("tmp2m") or data_id.endswith("tmin") or data_id.endswith("tmax"):
return 1979
if "sst" in data_id or "icec" in data_id:
return 1981
if data_id.endswith("mei"):
return 1979
if data_id.endswith("mjo"):
return 1974
if data_id.endswith("sce"):
return 1966
if "hgt" in data_id or "uwnd" in data_id or "vwnd" in data_id:
return 1948
if ("slp" in data_id or "pr_wtr" in data_id or "rhum" in data_id or
"pres" in data_id or "pevpr" in data_id):
return 1948
if data_id.startswith("subx_cfsv2"):
return 1999
raise ValueError("Unrecognized data_id "+data_id)
def get_last_year(data_id):
"""Returns last year in which ground truth data or forecast data is available
Args:
data_id: forecast identifier beginning with "nmme" or
ground truth identifier accepted by get_ground_truth
"""
return 2019
def get_ground_truth(gt_id, mask_df=None, shift=None):
"""Returns ground truth data as a dataframe
Args:
gt_id: string identifying which ground-truth data to return;
valid choices are "global_precip", "global_tmp2m", "us_precip",
"contest_precip", "contest_tmp2m", "contest_tmin", "contest_tmax",
"contest_sst", "contest_icec", "contest_sce",
"pca_tmp2m", "pca_precip", "pca_sst", "pca_icec", "mei", "mjo",
"pca_hgt_{}", "pca_uwnd_{}", "pca_vwnd_{}",
"pca_sst_2010", "pca_icec_2010", "pca_hgt_10_2010",
"contest_rhum.sig995", "contest_pres.sfc.gauss", "contest_pevpr.sfc.gauss",
"wide_contest_sst", "wide_hgt_{}", "wide_uwnd_{}", "wide_vwnd_{}",
"us_tmp2m", "us_tmin", "us_tmax", "us_sst", "us_icec", "us_sce",
"us_rhum.sig995", "us_pres.sfc.gauss", "us_pevpr.sfc.gauss"
mask_df: (optional) see load_measurement
shift: (optional) see load_measurement
"""
gt_file = os.path.join("data", "dataframes", "gt-"+gt_id+"-14d.h5")
printf(f"Loading {gt_file}")
if gt_id.endswith("mei"):
# MEI does not have an associated number of days
gt_file = gt_file.replace("-14d", "")
if gt_id.endswith("mjo"):
# MJO is not aggregated to a 14-day period
gt_file = gt_file.replace("14d", "1d")
return load_measurement(gt_file, mask_df, shift)
def get_ground_truth_unaggregated(gt_id, mask_df=None, shifts=None):
"""Returns daily ground-truth data as a dataframe, along with one column
per shift in shifts
"""
first_year = get_first_year(gt_id)
last_year = get_last_year(gt_id)
gt_file = os.path.join("data", "dataframes",
"gt-"+gt_id+"-1d-{}-{}.h5".format(
first_year, last_year))
gt = load_measurement(gt_file, mask_df)
if shifts is not None:
measurement_variable = get_measurement_variable(gt_id)
for shift in shifts:
# Shift ground truth measurements by shift for each lat lon and extend index
gt_shift = gt.groupby(['lat', 'lon']).apply(
lambda df: df[[measurement_variable]].set_index(df.start_date).shift(shift, freq="D")).reset_index()
# Rename variable to reflect shift
gt_shift.rename(columns={measurement_variable: measurement_variable +
"_shift"+str(shift)}, inplace=True)
# Merge into the main dataframe
gt = pd.merge(gt, gt_shift, on=[
"lat", "lon", "start_date"], how="outer")
return gt
def get_climatology(gt_id, mask_df=None):
"""Returns climatology data as a dataframe
Args:
gt_id: see load_measurement
mask_df: (optional) see load_measurement
"""
# Load global climatology if US climatology requested
climatology_file = os.path.join("data", "dataframes",
"official_climatology-"+gt_id+".h5")
return load_measurement(climatology_file, mask_df)
def get_ground_truth_anomalies(gt_id, mask_df=None, shift=None):
"""Returns ground truth data, climatology, and ground truth anomalies
as a dataframe
Args:
gt_id: see get_climatology
mask_df: (optional) see get_climatology
shift: (optional) see get_climatology
"""
date_col = "start_date"
# Get shifted ground truth column names
gt_col = get_measurement_variable(gt_id, shift=shift)
# Load unshifted ground truth data
tic()
gt = get_ground_truth(gt_id, mask_df=mask_df)
toc()
printf("Merging climatology and computing anomalies")
tic()
# Load associated climatology
climatology = get_climatology(gt_id, mask_df=mask_df)
if shift is not None and shift != 0:
# Rename unshifted gt columns to reflect shifted data name
cols_to_shift = gt.columns.drop(
['lat', 'lon', date_col], errors='ignore')
gt.rename(columns=dict(
list(zip(cols_to_shift, [col+"_shift"+str(shift) for col in cols_to_shift]))),
inplace=True)
unshifted_gt_col = get_measurement_variable(gt_id)
# Rename unshifted climatology column to reflect shifted data name
climatology.rename(columns={unshifted_gt_col: gt_col},
inplace=True)
# Merge climatology into dataset
gt = pd.merge(gt, climatology[[gt_col]],
left_on=['lat', 'lon', gt[date_col].dt.month,
gt[date_col].dt.day],
right_on=[climatology.lat, climatology.lon,
climatology[date_col].dt.month,
climatology[date_col].dt.day],
how='left', suffixes=('', '_clim')).drop(['key_2', 'key_3'], axis=1)
clim_col = gt_col+"_clim"
# Compute ground-truth anomalies
anom_col = gt_col+"_anom"
gt[anom_col] = gt[gt_col] - gt[clim_col]
toc()
printf("Shifting dataframe")
tic()
# Shift dataframe without renaming columns
gt = shift_df(gt, shift=shift, rename_cols=False)
toc()
return gt
def in_month_day_range(test_datetimes, target_datetime, margin_in_days=0):
"""For each test datetime object, returns whether month and day is
within margin_in_days days of target_datetime month and day. Measures
distance between dates ignoring leap days.
Args:
test_datetimes: pandas Series of datetime.datetime objects
target_datetime: target datetime.datetime object (must not be Feb. 29!)
margin_in_days: number of days allowed between target
month and day and test date month and day
"""
# Compute target day of year in a year that is not a leap year
non_leap_year = 2017
target_day_of_year = pd.Timestamp(target_datetime.
replace(year=non_leap_year)).dayofyear
# Compute difference between target and test days of year
# after adjusting leap year days of year to match non-leap year days of year;
# This has the effect of treating Feb. 29 as the same date as Feb. 28
leap_day_of_year = 60
day_delta = test_datetimes.dt.dayofyear
day_delta -= (test_datetimes.dt.is_leap_year &
(day_delta >= leap_day_of_year))
day_delta -= target_day_of_year
# Return true if test day within margin of target day when we account for year
# wraparound
return ((np.abs(day_delta) <= margin_in_days) |
((365 - margin_in_days) <= day_delta) |
(day_delta <= (margin_in_days - 365)))
def month_day_subset(data, target_datetime, margin_in_days=0,
start_date_col="start_date"):
"""Returns subset of dataframe rows with start date month and day
within margin_in_days days of the target month and day. Measures
distance between dates ignoring leap days.
Args:
data: pandas dataframe with start date column containing datetime values
target_datetime: target datetime.datetime object providing target month
and day (will treat Feb. 29 like Feb. 28)
start_date_col: name of start date column
margin_in_days: number of days allowed between target
month and day and start date month and day
"""
if (target_datetime.day == 29) and (target_datetime.month == 2):
target_datetime = target_datetime.replace(day=28)
return data.loc[in_month_day_range(data[start_date_col], target_datetime,
margin_in_days)]
# return data.loc[(data[start_date_col].dt.month == target_datetime.month) &
# (data[start_date_col].dt.day == target_datetime.day)]
def load_forecast_from_file(file_name, mask_df=None):
"""Loads forecast data from file and returns as a dataframe
Args:
file_name: HDF5 file containing forecast data
forecast_variable: name of forecasted variable (see get_forecast_variable)
target_horizon: target forecast horizon
("34w" for 3-4 weeks or "56w" for 5-6 weeks)
mask_df: (optional) see load_measurement
"""
# Load forecast dataframe
forecast = | pd.read_hdf(file_name) | pandas.read_hdf |
# my_script.py
from pandas import DataFrame
# from my_mod import enlarge
from my_mod import enlarge # this works
print('Hello')
df = | DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
from operator import itemgetter
from abc import ABCMeta, abstractmethod
from flow_equation_parser import FlowEquationParser
class IntermediateVectorManager:
def __init__(self, couplings):
self.couplings = couplings
self.num_intermediate_vectors = 0
self.intermediate_vectors = np.array([])
self.global_iterator_map = dict()
for coupling_index, key in enumerate(self.couplings):
self.global_iterator_map[key] = "variables[" + str(coupling_index) + "]"
self.num_couplings = len(self.couplings)
def get_intermediate_vector(self):
# Generate new intermediate vector
if len(self.intermediate_vectors) == len(np.array(list(self.global_iterator_map.values()))) - self.num_couplings:
self.intermediate_vectors = np.append(
self.intermediate_vectors, "inter_med_vec" + str(self.num_intermediate_vectors))
self.num_intermediate_vectors += 1
return self.intermediate_vectors[-1]
# Return existing intermediate vector
else:
used_intermediate_vectors = list(itemgetter(*self.get_actual_intermediate_vector_keys())(
self.global_iterator_map))
available_intermediate_vectors = [item for item in self.intermediate_vectors
if item not in used_intermediate_vectors]
return available_intermediate_vectors[-1]
def get_actual_intermediate_vector_keys(self):
keys = list(self.global_iterator_map.keys())
return [item for item in keys if item not in self.couplings]
def free_intermediate_vector(self, name):
self.global_iterator_map.pop(name)
class ThrustMetaProgrammer:
__metaclass__ = ABCMeta
comp_functor_counter = 0
def __init__(self, **kwargs):
super(ThrustMetaProgrammer, self).__init__()
self.theory_name = kwargs.pop("theory_name")
self.dim = kwargs.pop("dim")
self.base_struct_name = kwargs.pop("base_struct_name")
self.class_name = ''.join([item.capitalize() for item in self.theory_name.split(sep="_")])
self.intermediate_vector_manager = None
self.couplings = None
@abstractmethod
def write_header(self):
pass
@abstractmethod
def write_footer(self):
pass
def write_flow_equation(self, dim_index, flow_equation_parser):
constant_expressions = flow_equation_parser.operation_tree_dataframe.query("type == 'constant expression'").value
unique_constant_expressions = | pd.unique(constant_expressions) | pandas.unique |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 13:23:20 2022
@author: lawashburn
"""
import os
import csv
import pandas as pd
import numpy as np
from datetime import datetime
now = datetime.now()
spectra_import = r"C:\Users\lawashburn\Documents\HyPep1.0\HyPep_Simple_ASMS_Results\Raw_Files\Formatted_MS2\PO_3_untarget_ms2_output_list.csv"#path to spectra after RawConverter
ion_list_import = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\ion_list.csv"
precursor_list_import = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\precursor_list.csv"
working_directory = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\num_test"
final_dir =r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\Final_results"
data_type = 'PO_'
trial = '3_'
sample_name = 'PO 3'
error_marg = 10 #+/- ppm
h_mass = 1.00784
#spectra_import = input('Enter path to formatted spectra .txt file: ')
#ion_list_import = input('Enter path to ion fragment list .csv: ')
#precursor_list_import = input('Enter path to precursor mass .csv: ')
#working_directory = input('Enter path to working directory: ')
#final_dir = input('Enter path to output directory: ')
#data_type = input('Enter tissue type: ')
#trial = input('Enter trial number: ')
#sample_name = input('Enter sample name (e.g. TG2')
#error_marg = input('Enter ppm error cutoff: ')
print('loading files', datetime.now())
#formats spectra import values
spectra_import = | pd.read_csv(spectra_import, sep=",",skiprows=[0], names= ["m/z", "resolution", "charge", "intensity","MS2",'scan_number','empty']) | pandas.read_csv |
#!/usr/bin/env python3
import pytest
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import math
import torch
from neuralprophet import NeuralProphet, set_random_seed
from neuralprophet import df_utils
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "tests", "test-data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
NROWS = 256
EPOCHS = 2
BATCH_SIZE = 64
LR = 1.0
PLOT = False
def test_names():
log.info("testing: names")
m = NeuralProphet()
m._validate_column_name("hello_friend")
def test_train_eval_test():
log.info("testing: Train Eval Test")
m = NeuralProphet(
n_lags=10,
n_forecasts=3,
ar_sparsity=0.1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
df = m._handle_missing_data(df, freq="D", predicting=False)
df_train, df_test = m.split_df(df, freq="D", valid_p=0.1)
metrics = m.fit(df_train, freq="D", validation_df=df_test)
val_metrics = m.test(df_test)
log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_df_utils_func():
log.info("testing: df_utils Test")
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
# test find_time_threshold
df_dict, _ = df_utils.prep_copy_df_dict(df)
time_threshold = df_utils.find_time_threshold(df_dict, n_lags=2, valid_p=0.2, inputs_overbleed=True)
df_train, df_val = df_utils.split_considering_timestamp(
df_dict, n_lags=2, n_forecasts=2, inputs_overbleed=True, threshold_time_stamp=time_threshold
)
# init data params with a list
global_data_params = df_utils.init_data_params(df_dict, normalize="soft")
global_data_params = df_utils.init_data_params(df_dict, normalize="soft1")
global_data_params = df_utils.init_data_params(df_dict, normalize="standardize")
log.debug("Time Threshold: \n {}".format(time_threshold))
log.debug("Df_train: \n {}".format(type(df_train)))
log.debug("Df_val: \n {}".format(type(df_val)))
def test_trend():
log.info("testing: Trend")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
growth="linear",
n_changepoints=10,
changepoints_range=0.9,
trend_reg=1,
trend_reg_threshold=False,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_custom_changepoints():
log.info("testing: Custom Changepoints")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
dates = df["ds"][range(1, len(df) - 1, int(len(df) / 5.0))]
dates_list = [str(d) for d in dates]
dates_array = pd.to_datetime(dates_list).values
log.debug("dates: {}".format(dates))
log.debug("dates_list: {}".format(dates_list))
log.debug("dates_array: {} {}".format(dates_array.dtype, dates_array))
for cp in [dates_list, dates_array]:
m = NeuralProphet(
changepoints=cp,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_no_trend():
log.info("testing: No-Trend")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
growth="off",
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_seasons():
log.info("testing: Seasonality: additive")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="additive",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("SUM of yearly season params: {}".format(sum(abs(m.model.season_params["yearly"].data.numpy()))))
log.debug("SUM of weekly season params: {}".format(sum(abs(m.model.season_params["weekly"].data.numpy()))))
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
log.info("testing: Seasonality: multiplicative")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
# m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
def test_custom_seasons():
log.info("testing: Custom Seasonality")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
other_seasons = False
m = NeuralProphet(
yearly_seasonality=other_seasons,
weekly_seasonality=other_seasons,
daily_seasonality=other_seasons,
seasonality_mode="additive",
# seasonality_mode="multiplicative",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_seasonality(name="quarterly", period=90, fourier_order=5)
log.debug("seasonalities: {}".format(m.season_config.periods))
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar():
log.info("testing: AR")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=7,
yearly_seasonality=False,
epochs=EPOCHS,
# batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_sparse():
log.info("testing: AR (sparse")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=3,
n_lags=14,
ar_sparsity=0.5,
yearly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_deep():
log.info("testing: AR-Net (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg():
log.info("testing: Lagged Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=2,
n_lags=3,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
m = m.add_lagged_regressor(names="A")
m = m.add_lagged_regressor(names="B", only_last_value=True)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=10)
forecast = m.predict(future)
if PLOT:
print(forecast.to_string())
m.plot_last_forecast(forecast, include_previous_forecasts=5)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg_deep():
log.info("testing: List of Lagged Regressors (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=1,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(15, min_periods=1).mean()
df["C"] = df["y"].rolling(30, min_periods=1).mean()
cols = [col for col in df.columns if col not in ["ds", "y"]]
m = m.add_lagged_regressor(names=cols)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
forecast = m.predict(df)
if PLOT:
# print(forecast.to_string())
# m.plot_last_forecast(forecast, include_previous_forecasts=10)
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_events():
log.info("testing: Events")
df = pd.read_csv(PEYTON_FILE)[-NROWS:]
playoffs = pd.DataFrame(
{
"event": "playoff",
"ds": pd.to_datetime(
[
"2008-01-13",
"2009-01-03",
"2010-01-16",
"2010-01-24",
"2010-02-07",
"2011-01-08",
"2013-01-12",
"2014-01-12",
"2014-01-19",
"2014-02-02",
"2015-01-11",
"2016-01-17",
"2016-01-24",
"2016-02-07",
]
),
}
)
superbowls = pd.DataFrame(
{
"event": "superbowl",
"ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]),
}
)
events_df = pd.concat((playoffs, superbowls))
m = NeuralProphet(
n_lags=2,
n_forecasts=30,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# set event windows
m = m.add_events(
["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5
)
# add the country specific holidays
m = m.add_country_holidays("US", mode="additive", regularization=0.5)
m.add_country_holidays("Indonesia")
m.add_country_holidays("Thailand")
m.add_country_holidays("Philippines")
m.add_country_holidays("Pakistan")
m.add_country_holidays("Belarus")
history_df = m.create_df_with_events(df, events_df)
metrics_df = m.fit(history_df, freq="D")
future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90)
forecast = m.predict(df=future)
log.debug("Event Parameters:: {}".format(m.model.event_params))
if PLOT:
m.plot_components(forecast)
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_future_reg():
log.info("testing: Future Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
regressors_df_future = pd.DataFrame(data={"A": df["A"][-50:], "B": df["B"][-50:]})
df = df[:-50]
m = m.add_future_regressor(name="A")
m = m.add_future_regressor(name="B", mode="multiplicative")
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_plot():
log.info("testing: Plotting")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=m.n_forecasts, n_historic_predictions=10)
forecast = m.predict(future)
m.plot(forecast)
m.plot_last_forecast(forecast, include_previous_forecasts=10)
m.plot_components(forecast)
m.plot_parameters()
m.highlight_nth_step_ahead_of_each_forecast(7)
forecast = m.predict(df)
m.plot(forecast)
m.plot_last_forecast(forecast, include_previous_forecasts=10)
m.plot_components(forecast)
m.plot_parameters()
if PLOT:
plt.show()
def test_air_data():
log.info("TEST air_passengers.csv")
df = pd.read_csv(AIR_FILE)
m = NeuralProphet(
n_changepoints=0,
yearly_seasonality=2,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(df, freq="MS")
future = m.make_future_dataframe(df, periods=48, n_historic_predictions=len(df) - m.n_lags)
forecast = m.predict(future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_random_seed():
log.info("TEST random seed")
df = pd.read_csv(PEYTON_FILE, nrows=512)
set_random_seed(0)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum1 = sum(forecast["yhat1"].values)
set_random_seed(0)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum2 = sum(forecast["yhat1"].values)
set_random_seed(1)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum3 = sum(forecast["yhat1"].values)
log.debug("should be same: {} and {}".format(checksum1, checksum2))
log.debug("should not be same: {} and {}".format(checksum1, checksum3))
assert math.isclose(checksum1, checksum2)
assert not math.isclose(checksum1, checksum3)
def test_yosemite():
log.info("TEST Yosemite Temps")
df = pd.read_csv(YOS_FILE, nrows=NROWS)
m = NeuralProphet(
changepoints_range=0.95,
n_changepoints=15,
weekly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(df, freq="5min")
future = m.make_future_dataframe(df, periods=12 * 24, n_historic_predictions=12 * 24)
forecast = m.predict(future)
if PLOT:
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_model_cv():
log.info("CV from model")
def check_simple(df):
m = NeuralProphet(
learning_rate=LR,
)
folds = m.crossvalidation_split_df(df, freq="D", k=5, fold_pct=0.1, fold_overlap_pct=0.5)
assert all([70 + i * 5 == len(train) for i, (train, val) in enumerate(folds)])
assert all([10 == len(val) for (train, val) in folds])
def check_cv(df, freq, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct):
m = NeuralProphet(
n_lags=n_lags,
n_forecasts=n_forecasts,
learning_rate=LR,
)
folds = m.crossvalidation_split_df(df, freq=freq, k=k, fold_pct=fold_pct, fold_overlap_pct=fold_overlap_pct)
total_samples = len(df) - m.n_lags + 2 - (2 * m.n_forecasts)
per_fold = int(fold_pct * total_samples)
not_overlap = per_fold - int(fold_overlap_pct * per_fold)
assert all([per_fold == len(val) - m.n_lags + 1 - m.n_forecasts for (train, val) in folds])
assert all(
[
total_samples - per_fold - (k - i - 1) * not_overlap == len(train) - m.n_lags + 1 - m.n_forecasts
for i, (train, val) in enumerate(folds)
]
)
check_simple(pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}))
check_cv(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}),
n_lags=10,
n_forecasts=5,
freq="D",
k=5,
fold_pct=0.1,
fold_overlap_pct=0,
)
check_cv(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}),
n_lags=10,
n_forecasts=15,
freq="D",
k=5,
fold_pct=0.1,
fold_overlap_pct=0.5,
)
def test_loss_func():
log.info("TEST setting torch.nn loss func")
df = | pd.read_csv(PEYTON_FILE, nrows=512) | pandas.read_csv |
import pandas as pd
from app import db
from app.fetcher.fetcher import Fetcher
from app.models import Umrti
class DeathsFetcher(Fetcher):
"""
Class for updating deaths table.
"""
DEATHS_CSV = 'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/umrti.csv'
def __init__(self):
super().__init__(Umrti.__tablename__, self.DEATHS_CSV, check_date=False)
def fetch(self, import_id: int) -> None:
df = pd.read_csv(self._url)
vekova_skupina = pd.read_sql_query('select vekova_skupina, min_vek, max_vek from populace_kategorie', db.engine)
vekova_skupina['join'] = 0
vek = pd.Series(range(0, 151), name='vek').to_frame()
vek['join'] = 0
merged = pd.merge(vek, vekova_skupina)
merged = merged.where((merged['vek'] >= merged['min_vek']) & (merged['vek'] <= merged['max_vek'])).dropna()
df = | pd.merge(df, merged, how='left') | pandas.merge |
import pandas as pd
import country_converter as coco
cc = coco.CountryConverter()
def convert_country(country):
return cc.convert(names=[country], to="ISO3")
# read data
happiness_df = pd.read_excel("data/raw/Chapter2OnlineData.xlsx",
sheet_name="Figure2.6")
happiness_names = list(happiness_df["Country"])
happiness_codes = [convert_country(n) for n in happiness_names]
happiness_df["ISO3"] = happiness_codes
competitiveness_df = pd.read_csv("data/raw/competitiveness.csv",
encoding="utf-8",
sep="\t")
competitiveness_names = list(competitiveness_df["Country / Economy"])
competitiveness_codes = [convert_country(n) for n in competitiveness_names]
competitiveness_df["ISO3"] = competitiveness_codes
freedom_df = pd.read_csv("data/raw/fiw.csv",
encoding="utf-8",
sep="\t")
freedom_names = list(freedom_df["Country or Territory"])
freedom_codes = [convert_country(n) for n in freedom_names]
freedom_df["ISO3"] = freedom_codes
gdp_df = pd.read_csv("data/raw/gdp_ppp.csv",
encoding="utf-8",
sep="\t")
gdp_names = list(gdp_df["Country"])
gdp_codes = [convert_country(n) for n in gdp_names]
gdp_df["ISO3"] = gdp_codes
business_df = pd.read_excel("data/raw/Rankings.xlsx",
sheet_name="Sheet1")
business_names = business_df["Economy"]
business_codes = [convert_country(n) for n in business_names]
business_df["ISO3"] = business_codes
law_df = pd.read_csv("data/raw/rol.csv",
encoding="utf-8",
sep="\t")
law_names = list(law_df["Country"])
law_codes = [convert_country(n) for n in law_names]
law_df["ISO3"] = law_codes
science_df = pd.read_csv("data/raw/scimagojr.csv",
encoding="utf-8",
sep="\t")
science_names = list(science_df["Country"])
science_codes = [convert_country(n) for n in science_names]
science_df["ISO3"] = science_codes
geo_df = pd.read_csv("data/raw/country-capitals.csv",
encoding="utf-8",
sep=",")
geo_names = list(geo_df["CountryName"])
geo_codes = [convert_country(n) for n in geo_names]
geo_df["ISO3"] = geo_codes
hdi_df = pd.read_csv("data/raw/Human Development Index (HDI).csv",
encoding="utf-8",
sep=",",
na_values=["n.a", "NaN"])
hdi_df = hdi_df.dropna()
hdi_names = list(hdi_df["Country"])
hdi_codes = [convert_country(n) for n in hdi_names]
hdi_df["ISO3"] = hdi_codes
super_df = pd.merge(happiness_df, competitiveness_df, left_on="ISO3", right_on="ISO3")
super_df = pd.merge(super_df, freedom_df, left_on="ISO3", right_on="ISO3")
super_df = pd.merge(super_df, gdp_df, left_on="ISO3", right_on="ISO3")
super_df = pd.merge(super_df, business_df, left_on="ISO3", right_on="ISO3")
super_df = pd.merge(super_df, law_df, left_on="ISO3", right_on="ISO3")
super_df = | pd.merge(super_df, science_df, left_on="ISO3", right_on="ISO3") | pandas.merge |
from __future__ import print_function, division, absolute_import
try:
import typing
except ImportError:
import collections as typing
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import colors
from matplotlib import patches
from matplotlib.tight_layout import get_renderer
def _aggregate_data(df, subset_size, sum_over):
"""
Returns
-------
df : DataFrame
full data frame
aggregated : Series
aggregates
"""
_SUBSET_SIZE_VALUES = ['auto', 'count', 'sum']
if subset_size not in _SUBSET_SIZE_VALUES:
raise ValueError('subset_size should be one of %s. Got %r'
% (_SUBSET_SIZE_VALUES, subset_size))
if df.ndim == 1:
# Series
input_name = df.name
df = pd.DataFrame({'_value': df})
if subset_size == 'auto' and not df.index.is_unique:
raise ValueError('subset_size="auto" cannot be used for a '
'Series with non-unique groups.')
if sum_over is not None:
raise ValueError('sum_over is not applicable when the input is a '
'Series')
if subset_size == 'count':
sum_over = False
else:
sum_over = '_value'
else:
# DataFrame
if sum_over is False:
raise ValueError('Unsupported value for sum_over: False')
elif subset_size == 'auto' and sum_over is None:
sum_over = False
elif subset_size == 'count':
if sum_over is not None:
raise ValueError('sum_over cannot be set if subset_size=%r' %
subset_size)
sum_over = False
elif subset_size == 'sum':
if sum_over is None:
raise ValueError('sum_over should be a field name if '
'subset_size="sum" and a DataFrame is '
'provided.')
gb = df.groupby(level=list(range(df.index.nlevels)), sort=False)
if sum_over is False:
aggregated = gb.size()
aggregated.name = 'size'
elif hasattr(sum_over, 'lower'):
aggregated = gb[sum_over].sum()
else:
raise ValueError('Unsupported value for sum_over: %r' % sum_over)
if aggregated.name == '_value':
aggregated.name = input_name
return df, aggregated
def _check_index(df):
# check all indices are boolean
if not all(set([True, False]) >= set(level)
for level in df.index.levels):
raise ValueError('The DataFrame has values in its index that are not '
'boolean')
df = df.copy(deep=False)
# XXX: this may break if input is not MultiIndex
kw = {'levels': [x.astype(bool) for x in df.index.levels],
'names': df.index.names,
}
if hasattr(df.index, 'codes'):
# compat for pandas <= 0.20
kw['codes'] = df.index.codes
else:
kw['labels'] = df.index.labels
df.index = pd.MultiIndex(**kw)
return df
def _scalar_to_list(val):
if not isinstance(val, (typing.Sequence, set)) or isinstance(val, str):
val = [val]
return val
def _get_subset_mask(agg, min_subset_size, max_subset_size,
min_degree, max_degree,
present, absent):
"""Get a mask over subsets based on size, degree or category presence"""
subset_mask = True
if min_subset_size is not None:
subset_mask = np.logical_and(subset_mask, agg >= min_subset_size)
if max_subset_size is not None:
subset_mask = np.logical_and(subset_mask, agg <= max_subset_size)
if (min_degree is not None and min_degree >= 0) or max_degree is not None:
degree = agg.index.to_frame().sum(axis=1)
if min_degree is not None:
subset_mask = np.logical_and(subset_mask, degree >= min_degree)
if max_degree is not None:
subset_mask = np.logical_and(subset_mask, degree <= max_degree)
if present is not None:
for col in _scalar_to_list(present):
subset_mask = np.logical_and(
subset_mask,
agg.index.get_level_values(col).values)
if absent is not None:
for col in _scalar_to_list(absent):
exclude_mask = np.logical_not(
agg.index.get_level_values(col).values)
subset_mask = np.logical_and(subset_mask, exclude_mask)
return subset_mask
def _filter_subsets(df, agg,
min_subset_size, max_subset_size,
min_degree, max_degree):
subset_mask = _get_subset_mask(agg,
min_subset_size=min_subset_size,
max_subset_size=max_subset_size,
min_degree=min_degree,
max_degree=max_degree,
present=None, absent=None)
if subset_mask is True:
return df, agg
agg = agg[subset_mask]
df = df[df.index.isin(agg.index)]
return df, agg
def _process_data(df, sort_by, sort_categories_by, subset_size,
sum_over, min_subset_size=None, max_subset_size=None,
min_degree=None, max_degree=None, reverse=False):
df, agg = _aggregate_data(df, subset_size, sum_over)
total = agg.sum()
df = _check_index(df)
totals = [agg[agg.index.get_level_values(name).values.astype(bool)].sum()
for name in agg.index.names]
totals = pd.Series(totals, index=agg.index.names)
# filter subsets:
df, agg = _filter_subsets(df, agg,
min_subset_size, max_subset_size,
min_degree, max_degree)
# sort:
if sort_categories_by == 'cardinality':
totals.sort_values(ascending=False, inplace=True)
elif sort_categories_by is not None:
raise ValueError('Unknown sort_categories_by: %r' % sort_categories_by)
df = df.reorder_levels(totals.index.values)
agg = agg.reorder_levels(totals.index.values)
if sort_by == 'cardinality':
agg = agg.sort_values(ascending=False)
elif sort_by == 'degree':
index_tuples = sorted(agg.index,
key=lambda x: (sum(x),) + tuple(reversed(x)))
agg = agg.reindex(pd.MultiIndex.from_tuples(index_tuples,
names=agg.index.names))
elif sort_by is None:
pass
else:
raise ValueError('Unknown sort_by: %r' % sort_by)
# add '_bin' to df indicating index in agg
# XXX: ugly!
def _pack_binary(X):
X = pd.DataFrame(X)
out = 0
for i, (_, col) in enumerate(X.items()):
out *= 2
out += col
return out
df_packed = _pack_binary(df.index.to_frame())
data_packed = _pack_binary(agg.index.to_frame())
df['_bin'] = | pd.Series(df_packed) | pandas.Series |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from sklearn.pipeline import Pipeline
from hcrystalball.feature_extraction import HolidayTransformer
@pytest.mark.parametrize(
"X_y_with_freq, country_code, country_code_column, country_code_column_value, extected_error",
[
("series_with_freq_D", "DE", None, None, None),
("series_with_freq_D", None, "holiday_col", "DE", None),
("series_with_freq_M", "DE", None, None, ValueError), # not daily freq
("series_with_freq_Q", "DE", None, None, ValueError), # not daily freq
("series_with_freq_Y", "DE", None, None, ValueError), # not daily freq
(
"series_with_freq_D",
None,
"holiday_colsssss",
"DE",
KeyError,
), # there needs to be holiday_col in X
(
"series_with_freq_D",
None,
None,
None,
ValueError,
), # needs to have country_code or country_code_column
(
"series_with_freq_D",
"LALA",
"LALA",
None,
ValueError,
), # cannot have country_code and country_code_column in the same time
(
"series_with_freq_D",
"LALA",
None,
None,
ValueError,
), # country_code needs to be proper country
(
"series_with_freq_D",
None,
"holiday_col",
"Lala",
ValueError,
), # country_code needs to be proper country
],
indirect=["X_y_with_freq"],
)
def test_holiday_transformer_inputs(
X_y_with_freq,
country_code,
country_code_column,
country_code_column_value,
extected_error,
):
X, _ = X_y_with_freq
if extected_error is not None:
with pytest.raises(extected_error):
holiday_transformer = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
)
if country_code_column:
X["holiday_col"] = country_code_column_value
holiday_transformer.fit_transform(X)
else:
holiday_transformer = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
)
if country_code_column:
X[country_code_column] = country_code_column_value
holiday_transformer.fit_transform(X)
if country_code_column:
assert holiday_transformer.get_params()["country_code"] is None
@pytest.mark.parametrize(
"country_code, country_code_column, country_code_column_value, exp_col_name",
[
("CZ", None, None, "_holiday_CZ"),
(None, "holiday_col", "CZ", "_holiday_holiday_col"),
],
)
def test_holiday_transformer_transform(
country_code, country_code_column, country_code_column_value, exp_col_name
):
expected = {exp_col_name: ["Labour Day", "", "", "", "", "", "", "Liberation Day", "", ""]}
X = pd.DataFrame(index=pd.date_range(start="2019-05-01", periods=10))
df_expected = pd.DataFrame(expected, index=X.index)
if country_code_column:
X[country_code_column] = country_code_column_value
df_result = HolidayTransformer(
country_code=country_code, country_code_column=country_code_column
).fit_transform(X)
assert_frame_equal(df_result, df_expected)
@pytest.mark.parametrize(
"country_code_first, country_code_column_first, country_code_column_first_value, "
"country_code_second, country_code_column_second, country_code_column_second_value",
[
("CZ", None, None, "SK", None, None),
(None, "czech", "CZ", None, "slovak", "SK"),
("CZ", None, None, None, "slovak", "SK"),
(None, "czech", "CZ", "SK", None, None),
],
)
def test_two_transformers(
country_code_first,
country_code_column_first,
country_code_column_first_value,
country_code_second,
country_code_column_second,
country_code_column_second_value,
):
first_suffix = country_code_first or country_code_column_first
second_suffix = country_code_second or country_code_column_second
expected = {
f"_holiday_{first_suffix}": [
"Labour Day",
"",
"",
"",
"",
"",
"",
"Liberation Day",
"",
"",
],
f"_holiday_{second_suffix}": [
"Labour Day",
"",
"",
"",
"",
"",
"",
"Liberation Day",
"",
"",
],
}
X = pd.DataFrame(index=pd.date_range(start="2019-05-01", periods=10))
df_expected = pd.DataFrame(expected, index=X.index)
if country_code_column_first:
X[country_code_column_first] = country_code_column_first_value
if country_code_column_second:
X[country_code_column_second] = country_code_column_second_value
pipeline = Pipeline(
[
(
f"holidays_{first_suffix}",
HolidayTransformer(
country_code_column=country_code_column_first,
country_code=country_code_first,
),
),
(
f"holidays_{second_suffix}",
HolidayTransformer(
country_code_column=country_code_column_second,
country_code=country_code_second,
),
),
]
)
df_result = pipeline.fit_transform(X)
assert_frame_equal(df_result, df_expected)
@pytest.fixture()
def expected_result_holidays_related_features(request):
if "without_related_features" in request.param:
result = {
"_holiday_DE": [
"Good Friday",
"",
"",
"Easter Monday",
"",
"",
"",
"",
"",
"",
]
}
elif "all_related_features" in request.param:
result = {
"_holiday_DE": [
"Good Friday",
"",
"",
"Easter Monday",
"",
"",
"",
"",
"",
"",
],
"_2_before_holiday_DE": [
False,
True,
True,
False,
False,
False,
False,
False,
False,
False,
],
"_2_after_holiday_DE": [
False,
True,
True,
False,
True,
True,
False,
False,
False,
False,
],
"_bridge_holiday_DE": [
False,
True,
True,
False,
False,
False,
False,
False,
False,
False,
],
}
elif "features_without_bridge_days" in request.param:
result = {
"_holiday_DE": [
"Good Friday",
"",
"",
"Easter Monday",
"",
"",
"",
"",
"",
"",
],
"_1_before_holiday_DE": [
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
],
"_1_after_holiday_DE": [
False,
True,
False,
False,
True,
False,
False,
False,
False,
False,
],
}
elif "just_before_holidays_1" in request.param:
result = {
"_holiday_DE": [
"Good Friday",
"",
"",
"Easter Monday",
"",
"",
"",
"",
"",
"",
],
"_1_before_holiday_DE": [
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
],
}
elif "bridge_days_work_just_with_after_and_before_days" in request.param:
result = {
"_holiday_DE": [
"Good Friday",
"",
"",
"Easter Monday",
"",
"",
"",
"",
"",
"",
],
"_1_after_holiday_DE": [
False,
True,
False,
False,
True,
False,
False,
False,
False,
False,
],
}
return pd.DataFrame(result, index=pd.date_range(start="2020-04-10", periods=10))
@pytest.mark.parametrize(
"""country_code,
days_before,
days_after,
bridge_days,
expected_result_holidays_related_features,
extected_error""",
[
("DE", 0, 0, False, "without_related_features", None),
("DE", 2, 2, True, "all_related_features", None),
("DE", 1, 1, False, "features_without_bridge_days", None),
("DE", 1, 0, False, "just_before_holidays_1", None),
(
"DE",
0,
1,
True,
"bridge_days_work_just_with_after_and_before_days",
ValueError,
),
],
indirect=["expected_result_holidays_related_features"],
)
def test_holidays_related_features(
country_code,
days_before,
days_after,
bridge_days,
expected_result_holidays_related_features,
extected_error,
):
X = pd.DataFrame(index= | pd.date_range(start="2020-04-10", periods=10) | pandas.date_range |
# SLA Predictor application
# CLASS Project: https://class-project.eu/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Created on 25 Mar 2021
# @author: <NAME> - ATOS
#
from flask import Flask, request, render_template, jsonify
from prometheus_api_client import PrometheusConnect
from prometheus_api_client.utils import parse_datetime
from datetime import timedelta
import pandas as pd
import sklearn
import pickle as pk
prom = PrometheusConnect(url ="http://192.168.7.42:9091/", disable_ssl=True)
metrics_names = ['go_goroutines','go_memstats_alloc_bytes','go_memstats_gc_cpu_fraction',
'go_memstats_gc_sys_bytes', 'go_memstats_heap_alloc_bytes',
'go_memstats_heap_idle_bytes', 'go_memstats_heap_inuse_bytes',
'go_memstats_heap_objects', 'go_memstats_heap_released_bytes',
'go_memstats_heap_sys_bytes', 'go_memstats_last_gc_time_seconds',
'go_memstats_mspan_inuse_bytes', 'go_memstats_next_gc_bytes',
'go_memstats_other_sys_bytes','go_memstats_stack_inuse_bytes',
'go_memstats_stack_sys_bytes', 'go_threads', 'node_boot_time_seconds',
'node_entropy_available_bits', 'node_filefd_allocated' ,'node_load1',
'node_load15', 'node_load5', 'node_memory_Active_anon_bytes',
'node_memory_Active_bytes', 'node_memory_Active_file_bytes',
'node_memory_AnonHugePages_bytes', 'node_memory_AnonPages_bytes',
'node_memory_Buffers_bytes', 'node_memory_Cached_bytes',
'node_memory_Committed_AS_bytes', 'node_memory_DirectMap2M_bytes',
'node_memory_DirectMap4k_bytes', 'node_memory_Dirty_bytes',
'node_memory_Inactive_anon_bytes', 'node_memory_Inactive_bytes',
'node_memory_Inactive_file_bytes', 'node_memory_KernelStack_bytes',
'node_memory_Mapped_bytes', 'node_memory_MemAvailable_bytes',
'node_memory_MemFree_bytes', 'node_memory_PageTables_bytes',
'node_memory_SReclaimable_bytes', 'node_memory_SUnreclaim_bytes',
'node_memory_Shmem_bytes', 'node_memory_Slab_bytes', 'node_procs_running', 'node_sockstat_TCP_alloc',
'node_sockstat_TCP_mem', 'node_sockstat_TCP_mem_bytes',
'node_sockstat_sockets_used', 'node_time_seconds',
'node_timex_frequency_adjustment_ratio', 'node_timex_maxerror_seconds',
'node_timex_offset_seconds', 'process_resident_memory_bytes',
'process_start_time_seconds']
def get_timeseries_from_metric(metric_name,start_time,end_time,chunk_size):
metric_data = prom.get_metric_range_data(
metric_name, # this is the metric name and label config
start_time=start_time,
end_time=end_time,
chunk_size=chunk_size,
)
# do some process to it: merging all timeseries values to one, and get the aggregated value
metric_d_all_df = pd.DataFrame()
if metric_data:
for i in range(0,len(metric_data)):
metric_d_df = pd.DataFrame(metric_data[i]["values"],columns=["timestamp", metric_name+str(i)])
metric_d_df['timestamp']= pd.to_datetime(metric_d_df['timestamp'], unit='s')
metric_d_df[metric_name+str(i)]= pd.to_numeric(metric_d_df[metric_name+str(i)], errors='coerce')
metric_d_df.set_index('timestamp', inplace=True)
metric_d_all_df = pd.concat([metric_d_all_df, metric_d_df], axis=0)
#metric_d_all_df = metric_d_all_df.groupby(pd.Grouper(freq='1Min')).aggregate("last")
metric_d_agg_df = metric_d_all_df
metric_d_agg_df[metric_name] = metric_d_all_df.aggregate("mean", axis=1)
#return metric_d_agg_df[metric_name]
metric_data_insert = []
metric_data_insert_time = metric_d_agg_df.index.values
metric_data_insert_val = metric_d_agg_df[metric_name].values
for i in range(0,len(metric_data_insert_time)):
metric_data_insert.append([metric_data_insert_time[i],metric_data_insert_val[i]])
metric_data_df = pd.DataFrame(metric_data_insert,columns=["timestamp", metric_name])
metric_data_df['timestamp']= pd.to_datetime(metric_data_df['timestamp'], unit='s')
metric_data_df[metric_name]= pd.to_numeric(metric_data_df[metric_name], errors='coerce')
metric_data_df.set_index('timestamp', inplace=True)
return metric_data_df
else:
return | pd.DataFrame() | pandas.DataFrame |
from os import link
import flask
from flask.globals import request
from flask import Flask, render_template
# library used for prediction
import numpy as np
import pandas as pd
import pickle
# library used for insights
import json
import plotly
import plotly.express as px
app = Flask(__name__, template_folder = 'templates')
link_active = None
# render home template
@app.route('/')
def main():
return(render_template('home.html', title = 'Home'))
# load pickle file
model = pickle.load(open('model/rf_classifier.pkl', 'rb'))
scaler = pickle.load(open('model/scaler.pkl', 'rb'))
@app.route('/form')
def form():
show_prediction = False
link_active = 'Form'
return(render_template('form.html', title = 'Form', show_prediction = show_prediction, link_active = link_active))
@app.route('/insights')
def insights():
link_active = 'Insights'
df = pd.read_csv('online_shoppers_intention.csv')
df['Revenue'] = np.where(df['Revenue'] == True, 'Yes', 'No')
df.rename(columns={'Revenue':'Intention to Buy'}, inplace = True)
color_map = {'Yes': '#FFBF00', 'No': '#36454F'}
df_sorted = df.sort_values('Intention to Buy', ascending = True)
fig1 = px.scatter(
df_sorted, x = 'BounceRates', y='ExitRates',
color='Intention to Buy', color_discrete_map=color_map,
labels = {
"BounceRates": "Bounce Rates", "ExitRates" : "Exit Rates"
}
)
fig1.update_layout(legend_traceorder='reversed')
graph1JSON = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder)
fig2 = px.box(
df, x = 'Intention to Buy', y='PageValues', color='Intention to Buy',
color_discrete_map=color_map,
labels = {
"PageValues" : "Page Values"
}
)
fig2.update_layout(legend_traceorder='reversed')
graph2JSON = json.dumps(fig2, cls=plotly.utils.PlotlyJSONEncoder)
dist_vt = df.groupby(['VisitorType', "Intention to Buy"]).count()[["Administrative"]]
cat_group = df.groupby(['VisitorType']).count()[["Administrative"]]
dist_vt["percentage"] = dist_vt.div(cat_group, level = 'VisitorType') * 100
dist_vt.reset_index(inplace = True)
dist_vt.columns = ['VisitorType', "Intention to Buy", "count", "percentage"]
dist_vt = dist_vt.sort_values(['VisitorType', 'Intention to Buy'], ascending=True)
dist_vt['VisitorType'] = np.where(
dist_vt['VisitorType'] == 'Returning_Visitor', 'Returning Visitor',
np.where(dist_vt['VisitorType'] == 'New_Visitor', 'New Visitor', 'Other')
)
fig3 = px.bar(
dist_vt, x = 'VisitorType', y = 'count', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map,
labels = {
"VisitorType" : "Visitor Type"
}
)
fig3.update_layout(showlegend=False)
graph3JSON = json.dumps(fig3, cls=plotly.utils.PlotlyJSONEncoder)
fig4 = px.bar(
dist_vt, x = 'VisitorType', y = 'percentage', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map, range_y = [0, 100],
labels = {
"VisitorType" : "Visitor Type"
}
)
fig4.update_layout(showlegend=False)
graph4JSON = json.dumps(fig4, cls=plotly.utils.PlotlyJSONEncoder)
df['Weekend'] = np.where(df['Weekend'] == True, 'Yes', 'No')
dist_weekend = df.groupby(['Intention to Buy', "Weekend"]).count()[["Administrative"]]
cat_group2 = df.groupby(['Weekend']).count()[["Administrative"]]
dist_weekend["percentage"] = dist_weekend.div(cat_group2, level = 'Weekend') * 100
dist_weekend.reset_index(inplace = True)
dist_weekend.columns = ["Intention to Buy", 'Weekend', "count", "percentage"]
fig5 = px.bar(
dist_weekend, x = 'Weekend', y = 'percentage', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map, range_y = [0, 100],
)
fig5.update_layout(showlegend=False)
graph5JSON = json.dumps(fig5, cls=plotly.utils.PlotlyJSONEncoder)
dist_vt_weekend = df[df['VisitorType'] == 'New_Visitor'].groupby(['Intention to Buy', "Weekend"]).count()[["Administrative"]]
cat_group3 = df[df['VisitorType'] == 'New_Visitor'].groupby(['Weekend']).count()[["Administrative"]]
dist_vt_weekend["percentage"] = dist_vt_weekend.div(cat_group3, level = 'Weekend') * 100
dist_vt_weekend.reset_index(inplace = True)
dist_vt_weekend.columns = ["Intention to Buy", 'Weekend', "count", "percentage"]
fig6 = px.bar(
dist_vt_weekend, x = 'Weekend', y = 'percentage', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map, range_y = [0, 100],
)
fig6.update_layout(showlegend=False)
graph6JSON = json.dumps(fig6, cls=plotly.utils.PlotlyJSONEncoder)
return(render_template('insights.html', title = 'Insights', link_active = link_active, graph1JSON = graph1JSON, graph2JSON = graph2JSON, graph3JSON = graph3JSON, graph4JSON = graph4JSON, graph5JSON = graph5JSON, graph6JSON = graph6JSON))
@app.route('/predict', methods=['POST'])
def predict():
'''
For rendering prediction result.
'''
link_active = 'Result'
show_prediction = True
# retrieve data
Administrative = int(request.form.get('Administrative'))
Administrative_Duration = float(request.form.get('Administrative_Duration'))
ProductRelated = int(request.form.get('ProductRelated'))
ProductRelated_Duration = float(request.form.get('ProductRelated_Duration'))
BounceRates = float(request.form.get('BounceRates'))
ExitRates = float(request.form.get('ExitRates'))
PageValues = float(request.form.get('PageValues'))
Month = int(request.form.get('Month'))
SpecialDay = request.form.get('SpecialDay')
Weekend = request.form.get('Weekend')
VisitorType = request.form.get('VisitorType')
TrafficType = request.form.get('TrafficType')
OperatingSystems = request.form.get('OperatingSystems')
Browser = request.form.get('Browser')
Region = request.form.get('Region')
# transform to log
Administrative = np.log1p(Administrative)
Administrative_Duration = np.log1p(Administrative_Duration)
ProductRelated = np.log1p(ProductRelated)
ProductRelated_Duration = np.log1p(ProductRelated_Duration)
BounceRates = np.log1p(BounceRates)
ExitRates = np.log1p(ExitRates)
PageValues = np.log1p(PageValues)
# set previously known values for one-hot encoding
known_SpecialDay = [0, 1]
known_OperatingSystems = [1, 2, 3, 'other']
known_Browser = [1, 2, 'other']
known_Region = [1, 2, 3, 4, 5, 6, 7, 8, 9]
known_VisitorType = ['New_Visitor', 'Other', 'Returning_Visitor']
known_Weekend = [False, True]
# encode the categorical value
SpecialDay_type = pd.Series([SpecialDay])
SpecialDay_type = pd.Categorical(SpecialDay_type, categories = known_SpecialDay)
SpecialDay_input = pd.get_dummies(SpecialDay_type, prefix = 'SpecialDay', drop_first=True)
OperatingSystems_type = pd.Series([OperatingSystems])
OperatingSystems_type = pd.Categorical(OperatingSystems_type, categories = known_OperatingSystems)
OperatingSystems_input = pd.get_dummies(OperatingSystems_type, prefix = 'OperatingSystems', drop_first=True)
Browser_type = pd.Series([Browser])
Browser_type = pd.Categorical(Browser_type, categories = known_Browser)
Browser_input = pd.get_dummies(Browser_type, prefix = 'Browser', drop_first=True)
Region_type = pd.Series([Region])
Region_type = pd.Categorical(Region_type, categories = known_Region)
Region_input = pd.get_dummies(Region_type, prefix = 'Region', drop_first=True)
VisitorType_type = pd.Series([VisitorType])
VisitorType_type = pd.Categorical(VisitorType_type, categories = known_VisitorType)
VisitorType_input = pd.get_dummies(VisitorType_type, prefix = 'VisitorType', drop_first=True)
Weekend_type = | pd.Series([Weekend]) | pandas.Series |
import time
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from dateutil.parser import parse
from sklearn.cross_validation import KFold
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv('../raw_data/d_train.csv',encoding="gbk")
test = pd.read_csv("../raw_data/d_test_B_20180128.csv",encoding="gbk")
train.drop(train[train["年龄"] >= 86].index,inplace=True)
fea_train = pd.read_csv("../raw_data/fea_train.csv")
fea_test = pd.read_csv("../raw_data/fea_test_B.csv")
fea_train1 = pd.read_csv("../raw_data/fea_train_1.csv")
fea_test1 = pd.read_csv("../raw_data/fea_test_B_1.csv")
fea_train2 = pd.read_csv("../raw_data/fea_train_2.csv")
fea_train3 = pd.read_csv("../raw_data/fea_train_3.csv")
fea_test2 = pd.read_csv("../raw_data/fea_test_2.csv")
fea_test3 = | pd.read_csv("../raw_data/fea_test_3.csv") | pandas.read_csv |
#!/usr/bin/env python3
'''compute the running exhaust emissions using perDistance rates'''
import sys
import pandas as pd
from smart_open import open
import geopandas as gpd
from joblib import Parallel,delayed
import yaml
from argparse import ArgumentParser
def groupRates(rates,vmx,srcTypeGroup,countyID,
timeIntervalID,roadTypeID,avgSpeedBin):
# filter the rates
rateSubset = rates[
(rates.sourceTypeID.isin(srcTypeGroup)) &
(rates.countyID == countyID) &
(rates.roadTypeID == roadTypeID) &
(rates.timeIntervalID == timeIntervalID) &
(rates.avgSpeedBinID == avgSpeedBin)
]
# filter the vmx
vmxSubset = vmx[
(vmx.sourceTypeID.isin(srcTypeGroup)) &
(vmx.timeIntervalID == timeIntervalID) &
(vmx.roadTypeID == roadTypeID) &
(vmx.countyID == countyID)
]
# merge
rateSubset = rateSubset.merge(
vmxSubset[['sourceTypeID','fuelTypeID','VMTmix']],
on = ['sourceTypeID','fuelTypeID']
)
# average and return
rateSubset['emRate'] = rateSubset.ratePerDistance*\
rateSubset.VMTmix/vmxSubset.VMTmix.sum()
return rateSubset.groupby(
['countyID','timeIntervalID','pollutantID','sourceTypeID',
'fuelTypeID','roadTypeID','avgSpeedBinID']
).emRate.sum().reset_index()
def processGroup(rates,vmx,vmap,vt,hour,speed,rt,fips,group):
grRate = groupRates(rates,vmx,vmap[vt],fips,hour,rt,speed)
group = group.drop(columns = ['vehType']).merge(
grRate,
on = ['timeIntervalID','avgSpeedBinID','roadTypeID','countyID']
)
group['emquant'] = group.vmt*group.emRate
return group.groupby(
['linkID','pollutantID','sourceTypeID','fuelTypeID']
).emquant.sum().reset_index()
def main():
parser = ArgumentParser()
parser.add_argument('vmxPath',help = 'path to the vehicle mix CSV')
parser.add_argument('ratesPath',help = 'path to the emission rates CSV')
parser.add_argument('year',type = int,help = 'emission rates year')
parser.add_argument('numCPU',type = int,help = 'number of CPUs to use')
parser.add_argument('--dayOfTheWeek',default = 'WK')
args = parser.parse_args()
vmxPath = args.vmxPath
ratesPath = args.ratesPath
year = args.year
numCPU = args.numCPU
dayOfTheWeek = args.dayOfTheWeek
# read the vmt
vmt = pd.read_csv('linkVMT.csv')
# read the links metadata and merge
links = | pd.read_csv('links.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Evaluate intra-class correlation coefficients (ICC) for each radiomics feature by comparing extractions from
each set of segmentations (e.g. normal, eroded, dilated segmentations).
Not for clinical use.
SPDX-FileCopyrightText: 2021 Medical Physics Unit, McGill University, Montreal, CAN
SPDX-FileCopyrightText: 2021 <NAME>
SPDX-FileCopyrightText: 2021 <NAME>
SPDX-License-Identifier: MIT
"""
import os
from os.path import join
import pandas as pd # This module is used to read CSV files of radiomics features
import numpy as np
import pingouin as pg # This statistical module is used to assess ICCs
import scipy.io as sio # This module is used to save ICC as .mat files
# INPUTS = PATHS FOR RAD FTS EXPORTATED FOR EACH SEGM AND EACH PREPROCESSING COMBINATION
# OUTPUTS = ICC FOR EACH PREPROCESSING COMBINATION
modality = 'MRI_SEQUENCE' # insert name of MRI sequence of interest (e.g. DCE2, ADC, DWI, etc.)
basepath = 'MYPROJECTFILEPATH/'
# Paths to each segmentations directory
mypath = 'MYPROJECTFILEPATH/PREPROCESSED_EXTRACTIONS/'+modality+'/SEG/'
mypathERO = 'MYPROJECTFILEPATH/PREPROCESSED_EXTRACTIONS/'+modality+'/EROSEG/'
mypathDIL = 'MYPROJECTFILEPATH/PREPROCESSED_EXTRACTIONS/'+modality+'/DILSEG/'
mypathsave = 'MYPROJECTFILEPATH/SAVE/'+modality
# Verify if file exists, and create it if not
if not os.path.isdir(mypathsave):
os.makedirs(mypathsave)
# List files in segmentation directories with files containing radiomics features
# of all patients for each set of preprocessing parameters
pathlabels = os.listdir(mypath)
pathEROlabels = os.listdir(mypathERO)
pathDILlabels = os.listdir(mypathDIL)
# Loop over sub-folders with each set of preprocessing parameters
for label in pathlabels:
dir1 = join(mypath, label)
listfiles = os.listdir(dir1)
dir2 = join(mypathERO, label)
listfiles2 = os.listdir(dir2)
dir3 = join(mypathDIL, label)
listfiles3 = os.listdir(dir3)
# get the current label
current_label = label
print("Current Label = ",current_label)
i, j = 0, 0
# Loop over data from all patients in sub-sub-folder
for file in listfiles:
# Make sure this patient's features were extracted for each segmentation
if file in listfiles2 and file in listfiles3:
# Read CSV file of radiomics features for this patient
global_feature = pd.read_csv(join(dir1,file), delimiter=',')
# Extract radiomics features (37 is the first element which is a radiomics feature for Pyradiomics extractions)
global_values = global_feature.iloc[37:-1,1]
global_values = global_values.astype(float)
global_values = np.array(global_values.values)
# Extract names of radiomics features e.g. original_gldm_DependenceEntropy
global_names = global_feature.iloc[37:-1,0]
global_names = np.array(global_names.values)
# Get all radiomics features with header for this patient
global_feature = np.vstack((global_names, global_values))
global_feature = pd.DataFrame(global_feature)
new_hd = global_feature.iloc[0]
new_hd.iloc[:] = new_hd.iloc[:].astype(str)
global_feature = global_feature[1:]
global_feature.columns = new_hd
global_feature.reset_index(drop = True)
rescaled_feature = global_feature # This is the final Data Frame we use for the normal segmentations
# Repeat for eroded segmentations
global_featureERO = pd.read_csv(join(dir2,file), delimiter=',')
global_valuesERO = global_featureERO.iloc[37:-1,1]
global_valuesERO = global_valuesERO.astype(float)
global_valuesERO = np.array(global_valuesERO.values)
global_namesERO = global_featureERO.iloc[37:-1,0]
global_namesERO = np.array(global_namesERO.values)
global_featureERO = np.vstack((global_namesERO, global_valuesERO))
global_featureERO = pd.DataFrame(global_featureERO)
new_hdERO = global_featureERO.iloc[0]
new_hdERO.iloc[:] = new_hdERO.iloc[:].astype(str)
global_featureERO = global_featureERO[1:]
global_featureERO.columns = new_hdERO
global_featureERO.reset_index(drop = True)
rescaled_featureERO = global_featureERO # This is the final Data Frame we use for eroded segmentations
# Repeat for dilated segmentations
global_featureDIL= pd.read_csv(join(dir3,file), delimiter=',')
global_valuesDIL= global_featureDIL.iloc[37:-1,1]
global_valuesDIL= global_valuesDIL.astype(float)
global_valuesDIL= np.array(global_valuesDIL.values)
global_namesDIL= global_featureDIL.iloc[37:-1,0]
global_namesDIL= np.array(global_namesDIL.values)
global_featureDIL= np.vstack((global_namesDIL, global_valuesDIL))
global_featureDIL= | pd.DataFrame(global_featureDIL) | pandas.DataFrame |
import os
import inspect
import config
from case_trends_finder import geo_transmission_analyzer
from simulation import Simulation
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from sklearn.metrics import mean_squared_error
import pickle
import skopt
import skopt.plots
import matplotlib
from matplotlib import pyplot as plt
from datetime import datetime
from pathlib import Path
import gc
import warnings
warnings.filterwarnings("ignore")
class SimulationData:
def __init__(self):
self.country_code = None
self.state_name = None
self.state_data_orig = None
self.state_data = None
self.country_level_projection = None
self.n_population = None
self.state_population = None
self.actual_testing_capacity = None
self.case_rate = None
self.adjusted_case_rate = None
self.scaling_factor = None
self.wave1_weeks = None
self.min_initial_infection = None
self.transmission_prob = None
self.transmission_control = None
self.transmission_control_range = None
self.wave1_weeks_range = None
self.intervention_scores = None
self.expected_rates = None
self.avg_time_to_peaks = None
self.mean_relative_change_rates = None
self.intervention_influence_pctg = None
self.fitment_days = None
self.test_days = None
self.projection_days = None
self.future_projection_days = None
self.wave1_start_date = None
self.wave1_peak_detected = None
self.days_between_disease_waves = None
self.weeks_between_disease_waves = None
self.wave2_peak_factor = None
self.wave2_spread_factor = None
def to_csv(self, csv_name):
attributes = inspect.getmembers(self, lambda a: not(inspect.isroutine(a)))
with open(csv_name, 'w+') as f:
for a in attributes:
if not(a[0].startswith('__') and a[0].endswith('__')):
f.write("%s,%s\n"%(a[0], a[1]))
# Derive incidence rate and fractions of population infected from recent case frequency data
def get_incidence_rate(state_data, rate, population, fitment_days):
# normalized case rate w.r.t. population
rate = rate / population / float(config.infected_and_symptomatic_in_population)
avg_active_cases_x_days_back = state_data.iloc[-fitment_days-3 : -fitment_days+2]['Total_Active'].mean()
avg_daily_cases_x_days_back = state_data.iloc[-fitment_days-3 : -fitment_days+2]['Confirmed'].mean()
# approx fraction of active infected population x days back
active_case_population_fraction_x_days_back = avg_active_cases_x_days_back / population / float(config.infected_and_symptomatic_in_population)
# approx fraction of total infected population x days back
daily_case_population_fraction_x_days_back = avg_daily_cases_x_days_back / population / float(config.infected_and_symptomatic_in_population)
#print ("get_incidence_rate", fitment_days, rate, avg_active_cases_x_days_back, avg_daily_cases_x_days_back)
return rate, active_case_population_fraction_x_days_back, daily_case_population_fraction_x_days_back
# Run simulation:
# - for fitment_days with trial params (during training)
# - for (fitment_days + projection_days) with learned params (during testing / projection)
def simulate(sim_data, learning_phase=False):
testing_capacity = sim_data.actual_testing_capacity * (sim_data.n_population / sim_data.state_population)
derived_case_rate, active_case_population_fraction_x_days_back, daily_case_population_fraction_x_days_back \
= get_incidence_rate(sim_data.state_data, sim_data.adjusted_case_rate, sim_data.state_population,
sim_data.fitment_days)
derived_case_rate *= sim_data.scaling_factor
Simulation.set_config(time_between_consecutive_pcr_tests=14,
attrition_rate=0.05,
initial_antibody_immunity_in_population=0.20,
add_ab=False)
if learning_phase:
n_days = sim_data.fitment_days
else:
#n_days = sim_data.fitment_days + sim_data.projection_days
n_days = sim_data.projection_days
weeks_between_waves = config.gap_weeks_between_disease_waves_default if sim_data.days_between_disease_waves is None else int(round(sim_data.days_between_disease_waves / 7))
simulator = Simulation(sim_data.n_population,
n_days,
sim_data.wave1_weeks,
weeks_between_waves,
derived_case_rate,
active_case_population_fraction_x_days_back,
daily_case_population_fraction_x_days_back,
int(testing_capacity),
transmission_control=sim_data.transmission_control,
transmission_prob=sim_data.transmission_prob,
intervention_influence_pctg=sim_data.intervention_influence_pctg,
wave2_peak_factor = sim_data.wave2_peak_factor,
wave2_spread_factor = sim_data.wave2_spread_factor,
log_results=False
)
# Run the simulation to project the spread of infection
results = simulator.run(learning_phase, n_days=n_days, n_population=sim_data.n_population,
intervention_scores=sim_data.intervention_scores)
daily_stats = []
for dict in results[1]:
daily_stats.append([dict['Daily New Infection'], dict['Infected working in FC and not in quarantine'],
dict['Sent To Quarantine']])
df_results = pd.DataFrame(daily_stats, columns=['new_cases', 'open_infectious', 'quarantined'])
# Using rolling avg of simulation outcome to smoothen the projection
df_results = df_results.rolling(10, min_periods=1).mean()
# Scaling the projection for the state's population
df_results = df_results * (sim_data.state_population / sim_data.n_population)
df_results['total_cases'] = df_results['new_cases'].cumsum(axis=0, skipna=True)
# Accommodate the prior (before the fitment period stat date) total confirmed cases into the projected numbers
df_results['total_cases'] += sim_data.state_data['Total_Confirmed'].iloc[-sim_data.fitment_days]
start_date = sim_data.wave1_start_date
dates = pd.date_range(start_date, periods=len(daily_stats), freq='D')
df_results['date'] = dates
df_results.index = df_results['date']
if sim_data.scaling_factor > 1:
cols = ['new_cases', 'open_infectious', 'quarantined', 'total_cases']
df_results[cols] /= sim_data.scaling_factor
df_results[cols] = df_results[cols].astype(int)
return df_results
# Measure fitment error during parameters learning process
def measure_diff(params, sim_datax, optimize_wave1_weeks):
sim_data = pickle.loads(pickle.dumps(sim_datax))
if optimize_wave1_weeks:
sim_data.transmission_control, sim_data.wave1_weeks = params
else:
sim_data.transmission_control = params[0]
sim_data.wave1_weeks = np.median(sim_data.wave1_weeks_range)
# Optimizing transmission_control and wave1_weeks separately would result in better accuracy, though cost more time
df_results = simulate(sim_data, learning_phase=True)
projected_cases = df_results['total_cases']
actual_cases = sim_data.state_data.loc[-sim_data.fitment_days:, 'Total_Confirmed']
if sim_data.scaling_factor > 1:
actual_cases /= sim_data.scaling_factor
comparison_span = min(config.fitment_period_max, sim_data.fitment_days) # Days to compare model performance for
weights = 1 / np.arange(1, comparison_span + 1)[::-1] # More weights to recent cases
# Measure error using MSLE / RMSE / MSE
# error = mean_squared_log_error(actual_cases[-comparison_span:], projected_cases[-comparison_span:], weights)
# error = sqrt(mean_squared_error(actual_cases[-comparison_span:], projected_cases[-comparison_span:], weights))
#error = mean_squared_error(actual_cases[-comparison_span:], projected_cases[-comparison_span:], weights)
error = mean_squared_error(actual_cases[-comparison_span:], projected_cases[-comparison_span:])
del sim_data
return error
# Learn best parameters for simulation (transmission prob, wave1_weeks) via random / Bayesian search techniques
def fit_and_project(sim_data, n_calls=40, n_jobs=8):
param_space = [skopt.space.Real(sim_data.transmission_control_range[0], sim_data.transmission_control_range[1],
name='transmission_control', prior='log-uniform')]
optimize_wave1_weeks = True if not sim_data.wave1_peak_detected else False
if optimize_wave1_weeks:
param_space.append(skopt.space.Integer(sim_data.wave1_weeks_range[0], sim_data.wave1_weeks_range[1],
name='wave1_weeks'))
def objective(params):
return measure_diff(params, sim_data, optimize_wave1_weeks)
def monitor(res):
print(len(res.func_vals), sep='', end=',')
print (param_space)
print('\n' + '*' * 100)
print('Learning Iterations # ', sep='', end='')
measurements = skopt.gp_minimize(objective, param_space, callback=[monitor], n_calls=n_calls, n_jobs=n_jobs)
best_score = measurements.fun
best_params = measurements.x
print('\n' + '*' * 100)
# Best parameters
print('Lowest Error Observed: {}'.format(best_score))
print('Best Param(s): {}'.format(best_params))
return measurements
# Learn simulation parameters (transmission prob, wave1_weeks)
def learn_parameters(sim_data, n_calls=40, n_jobs=8, params_export_path=None):
opt_results = fit_and_project(sim_data, n_calls=n_calls, n_jobs=n_jobs)
n_best = 5
error_scores = opt_results.func_vals
best_score_indices = np.argsort(opt_results.func_vals)[:n_best]
print('\n\nBest {} Param(s):'.format(n_best))
top_scores = list()
print('- ' * 50)
for i in best_score_indices:
print('Params: {} | Error: {}'.format(opt_results.x_iters[i], error_scores[i]))
tranmission_prob = opt_results.x_iters[i][0]
wave1_weeks = opt_results.x_iters[i][1] if not sim_data.wave1_peak_detected else int(np.mean(sim_data.wave1_weeks_range))
top_scores.append([error_scores[i], wave1_weeks, tranmission_prob, sim_data.fitment_days, sim_data.test_days])
print('- ' * 50)
df_best_params = pd.DataFrame(top_scores, columns=['error', 'wave1_weeks', 'tranmission_prob', 'fitment_days',
'test_days'])
if params_export_path is not None:
print('Writing simulation params at : {}'.format(params_export_path))
if not os.path.exists(params_export_path.rsplit('/', 1)[0]):
print('Creating {}'.format(params_export_path.rsplit('/', 1)[0]))
os.mkdir(params_export_path.rsplit('/', 1)[0])
df_best_params.to_csv(params_export_path)
return df_best_params['wave1_weeks'].iloc[0], df_best_params['tranmission_prob'].iloc[0],\
df_best_params['fitment_days'].iloc[0], df_best_params['test_days'].iloc[0]
def plot_all(sim_data, simulation_titles, intervention_scores_list):
ylim1, ylim2 = -1, -1
for i, intervention_scores in enumerate(intervention_scores_list):
print('\n')
print(simulation_titles[i] if simulation_titles is not None else 'Simulation # {}'.format(i))
projection_file_name = config.country_simulation_results_path\
.format(sim_data.country_code, str(i+1)) if sim_data.country_level_projection \
else config.state_simulation_results_path.format(sim_data.state_name, str(i+1))
df_results = pd.read_csv(os.path.join(config.base_output_dir, projection_file_name))
df_results.index = pd.to_datetime(df_results['date'])
# plot daily confirmed projections
ylim1_tmp, ylim2_tmp = plot_projection(df_results, sim_data, ylim1, ylim2)
ylim1 = max(ylim1_tmp, ylim1)
ylim2 = max(ylim2_tmp, ylim2)
# Plot projection results against available actual numbers
def plot_projection(df_results, sim_data, ylim1, ylim2):
df_loc = sim_data.state_data_orig.copy()
df_loc['Date'] = pd.to_datetime(df_loc['Date'])
df_loc.index = df_loc['Date']
if sim_data.scaling_factor > 1:
target_cols = ['Confirmed', 'Deceased', 'Recovered', 'Total_Confirmed', 'Total_Deceased', 'Total_Recovered',
'Total_Active']
df_loc[target_cols] /= sim_data.scaling_factor
df_loc[target_cols] = df_loc[target_cols].astype(int)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(18, 6))
df_loc['Confirmed'].plot(title='Daily Confirmed Cases Projection', label='daily confirmed', ax=ax[0])
df_loc['Total_Confirmed'].plot(title='Total Confirmed Cases Projection', label='total confirmed', ax=ax[1])
df_results['new_cases'].plot(label='projection', ax=ax[0], color='darkorange')
df_results['total_cases'].plot(label='projection', ax=ax[1], color='darkorange')
ax[0].legend(loc="upper left")
ax[1].legend(loc="upper left")
ax[0].set_ylim(top=max(ylim1, ax[0].get_ylim()[1]))
ax[1].set_ylim(top=max(ylim2, ax[1].get_ylim()[1]))
fig.tight_layout()
plt.grid()
plt.show()
return ax[0].get_ylim()[1], ax[1].get_ylim()[1]
# Determine sample population size for intervention to ensure atleast N number of infections to start with
# This process also determines to what extent the given population size needs to be scaled up (scaling_factor)
def size_projection_population(state_data, case_rate, state_population, fitment_days, min_init_infections):
n_population_max = config.n_population_max
n_population = config.n_population
scaling_factor = 1
#abs_case_rate = get_rate_of_changes(state_data, days_to_consider=fitment_days)
incidence_rate, _, _ = get_incidence_rate(state_data, case_rate, state_population, fitment_days)
# Ensuring that minimum rate yields at least N cases while simulating
rate_multiple = min_init_infections / incidence_rate
if n_population < rate_multiple:
n_population = int(np.ceil(rate_multiple))
if n_population > n_population_max:
scaling_factor = n_population / n_population_max
n_population = n_population_max
print('Case Rate: {}, Incidence Rate: {}, Projection Population: {}, Scaling Factor: {}'.format(case_rate, incidence_rate, n_population, scaling_factor))
return n_population, scaling_factor
# Scale daily infection case data and augment respective aggregated, normalized intervention scores
def extend_infection_data(country_code, state_data, scaling_factor, intervention_scores_loc):
# Read country-wise daily intervention scores (aggregated between 0 to 1) - by intervention_scorer.ipynb
intv_scores = pd.read_csv(intervention_scores_loc)
country_intv_scores = intv_scores.loc[intv_scores['CountryCode'] == country_code]
country_intv_scores['Date'] = pd.to_datetime(country_intv_scores['Date'])
df_state = pd.merge(state_data, country_intv_scores[['Date', 'aggr_weighted_intv_norm']], how='left', on=['Date'])
df_state['aggr_weighted_intv_norm'].fillna(method='ffill', inplace=True)
# Fill 0 scores with last non-zero score. 0 scores might occur when intervention_scorer.ipynb is run on older data
df_state['aggr_weighted_intv_norm'].replace(to_replace=0, method='ffill', inplace=True)
if scaling_factor > 1:
target_cols = ['Confirmed', 'Deceased', 'Recovered', 'Total_Confirmed', 'Total_Deceased', 'Total_Recovered',
'Total_Active']
df_state[target_cols] *= scaling_factor
df_state[target_cols] = df_state[target_cols].astype(int)
return df_state
# Load stored params (transmission prob, wave1_weeks)
def get_parameters(params_export_path):
df_best_params = pd.read_csv(params_export_path)
return df_best_params['wave1_weeks'].iloc[0], df_best_params['tranmission_prob'].iloc[0], \
df_best_params['fitment_days'].iloc[0], df_best_params['test_days'].iloc[0]
# Run simulations for different rates (projected, high, low) for each of the the given intervention setups
def run_simulations(sim_data, intervention_scores_list, simulation_titles=None):
for i, intervention_scores in enumerate(intervention_scores_list):
sim_data_copy = pickle.loads(pickle.dumps(sim_data))
sim_data_copy.intervention_scores = intervention_scores
df_results = simulate(sim_data_copy)
projection_file_name = config.country_simulation_results_path\
.format(sim_data.country_code, str(i+1)) if sim_data.country_level_projection \
else config.state_simulation_results_path.format(sim_data.state_name, str(i+1))
df_results.to_csv(os.path.join(config.base_output_dir, projection_file_name))
del sim_data_copy
sim_data_file_name = config.country_simulation_data_path.format(sim_data.country_code) \
if sim_data.country_level_projection else config.state_simulation_data_path.format(sim_data.state_name)
sim_data_file = open(os.path.join(config.base_output_dir, sim_data_file_name), 'wb')
pickle.dump(sim_data, sim_data_file)
gc.collect()
# Plotting projections
if not config.sagemaker_run:
sim_data_loaded = pickle.load(open(os.path.join(config.base_output_dir, sim_data_file_name), 'rb'))
plot_all(sim_data_loaded, simulation_titles, intervention_scores_list)
# Prepare for projection by learning related parameters (e.g. transmission prob, wave1_weeks, higher bound,
# lower bound, etc.) to run simulation
def prep_projection(country_code, target_state, sim_data, learn_params=True):
intervention_scores_loc = os.path.join(config.base_data_dir, config.intervention_scores_loc)
try:
pd.read_csv(intervention_scores_loc)
except:
print('Error: File Missing: {}!'.format(intervention_scores_loc))
print('Load the latest intervention scores by running interventions_scorer.ipynb first and then run this '
'simulation.')
return None, 1
if sim_data.country_level_projection:
state_cases = os.path.join(config.base_data_dir, config.country_covid19_cases.format(target_state))
params_export_path = os.path.join(config.base_output_dir,
config.country_covid19_params_export_path.format(target_state))
else:
state_cases = os.path.join(config.base_data_dir, config.state_covid19_cases.format(country_code, target_state))
params_export_path = os.path.join(config.base_output_dir,
config.state_covid19_params_export_path.format(country_code, target_state))
try:
df_state = | pd.read_csv(state_cases) | pandas.read_csv |
#Import modules
import os
import pandas as pd
import numpy as np
from pandas import DatetimeIndex
import dask
import scipy
from scipy.optimize import minimize, LinearConstraint
import time
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import pickle
#Define Column Name
indexName = 'date'
indexExpiry = 'optionExpiry'
indexTenor = 'underlyingTerm'
indexStrike = 'Strike'
indexRelStrike = 'RelativeStrike'
def getTTMFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[0])
def getMoneynessFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[1])
def readfile(file):
print("file")
print(file)
def iterateOnFolderContent(folderName):
for elt in os.scandir(folderName):
if os.DirEntry.is_dir(elt):
print("Folder")
print(elt)
iterateOnFolderContent(elt)
else :
readfile(elt)
def parseTerm(stringTerm):
if 'M' == stringTerm[-1]:
return float(stringTerm[:-1])/12
elif 'Y' == stringTerm[-1]:
return float(stringTerm[:-1])
else :
raise Exception("Can not parse term")
def parseTenor(row):
return [parseTerm(row['underlyingTerm']), parseTerm(row['optionExpiry'])]
def smileFromSkew(skew):
atmVol = skew['A']
#smile = atmVol + skew[skewShift]
#return smile#.append(skew.drop(smile.index))
return atmVol + skew.drop('A')
def parseStrike(relStrike):
if relStrike.name[3] == 'A':
return relStrike['forward']
if "+" in relStrike.name[3]:
shift = int(relStrike.name[3].split("+")[1])
return relStrike['forward'] + shift/1000
if "-" in relStrike.name[3]:
shift = int(relStrike.name[3].split("-")[1])
return relStrike['forward'] - shift/1000
raise Exception(' Can not parse Strike ')
#intersection of all dates across history
def intersectionGrid(grid) :
nbDates = grid.index.get_level_values(0).unique().shape[0]
if nbDates <= 1:
return grid.index.droplevel(0)
else :
midDate = grid.index.get_level_values(0).unique()[int(nbDates/2)]
g1 = grid[grid.index.get_level_values(0) < midDate]
g2 = grid[grid.index.get_level_values(0) >= midDate]
return intersectionGrid(g1).intersection(intersectionGrid(g2))
def splitTrainTestDataRandomly(gridHistory, trainingSetPercentage):
nbDates = gridHistory.index.get_level_values(0).unique().shape[0]
trainingDates = np.random.choice(gridHistory.index.get_level_values(0).unique(),
replace=False,
size=int(nbDates * trainingSetPercentage))
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def splitTrainTestDataChronologically(gridHistory, trainingSetPercentage):
firstTestingDate = int(gridHistory.index.get_level_values(0).unique().shape[0]
* trainingSetPercentage)
trainingDates = gridHistory.index.get_level_values(0).unique()[:firstTestingDate]
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def sampleBatchOfDays(dataSet, nbDrawn):
trainingDates = np.random.choice(dataSet.index.get_level_values(0).unique(),
replace=False,
size=nbDrawn)
return dataSet.loc[trainingDates, :]
def splitHistory(history, colName):
return pd.pivot_table(history,
values = colName,
index = history.index.names,
columns=['Expiry','Tenor'])
def extractDataFromCSV(dataSetPath):
#Read csv file
data = pd.read_csv(dataSetPath)
#Parse tenor and expiry as float years
data['Tenor'],data['Expiry'] = zip(*data.apply(parseTenor,axis=1))
#Parse date as a datetime
data[indexName] = pd.to_datetime(data['businessDate'], dayfirst=True)
#Set Index as as a three dimension vector and sort observation
indexedData = data.set_index([indexExpiry, indexTenor, indexName]).sort_index()
#Keep relevant features
#Columns used for representing a Strike Value
skewShift = [shift for shift in indexedData.columns if ('A' in shift )]#and 'A' != shift
#Other Columns to keep
otherColumns = ['forward', 'Tenor', 'Expiry']
#Get columns indexed by a relative strike
skewHistory = indexedData[skewShift + otherColumns]#.apply(smileFromSkew,axis=1)
#Merge with other useful columns
#Stacking Smile
#Left outer Join on (tenor, expiry, date)
joinColumns = skewHistory.index.names
leftTable = skewHistory.drop(otherColumns, axis = 1).stack().rename("Vol")#Features depending on strike value
leftTable.index.names = [leftTable.index.names[0],
leftTable.index.names[1],
leftTable.index.names[2],
'RelativeStrike']
formattedHistory = leftTable.reset_index().merge(skewHistory[otherColumns].reset_index(),
on=joinColumns,
validate = "m:1").set_index(leftTable.index.names).sort_index()
#Convert strike shift as a float from a stringTerm
formattedHistory[indexStrike] = formattedHistory.apply(parseStrike,axis=1)
return formattedHistory
def equalDf(df1, df2):
if df1.shape == df2.shape :
if np.sum(np.isnan(df1.values)) != np.sum(np.isnan(df2.values)) :
print("Not the same number of nan")
return False
tol = 1e-6
gap = np.nansum(np.abs(df1.values - df2.values))
if gap < tol :
return True
else :
print("Large df error : ", gap)
return False
print("Not the same shape")
return False
def sampleSwaptionsToDelete(dataSet, completionRate):
return dataSet.iloc[0].sample(frac = completionRate).index
def removeSwaptionsToDelete(dataSet):
listToDelete = [(0.08333333333333333,0.25),(0.08333333333333333,10.0),
(0.08333333333333333,30.0),(0.5,2.0),(0.5,15.0),
(5.0,1.0),(5.0,20.0),(10.0,5.0)]
return dataSet.iloc[0].index.difference(listToDelete)
#Different from minmax scaler of scikit learn
#Min and Max are computed on the dataset, not column wise
class customMinMaxScale:
def __init__(self, feature_range = (0,1)):
self.min = feature_range[0]
self.max = feature_range[1]
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
self.datasetMin = dataset.min().min()
if enforceDataSetMin is not None :
self.datasetMin = min(enforceDataSetMin, self.datasetMin)
self.datasetMax = dataset.max().max()
if enforceDataSetMax is not None :
self.datasetMax = max(enforceDataSetMax, self.datasetMax)
return
def transform(self, dataset):
scale = (self.max - self.min) / (self.datasetMax - self.datasetMin)
return (dataset - self.datasetMin) * scale + self.min
def inverse_transform(self, scaledDataset):
scale = (self.max - self.min) / (self.datasetMax - self.datasetMin)
return (scaledDataset - self.min) / scale + self.datasetMin
#Encapsulation class for Sklearn Standard scaling
class customMeanStdScale:
def __init__(self, feature_range = (0,1)):
self.scalerList = []
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
self.scalerList = [StandardScaler() for i in range(tupleSize)]
for k in range(tupleSize):
funcAccess = lambda x : x[k]
scaler = self.scalerList[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler.fit(dfElt)
else :
self.scalerList = []
self.scalerList.append(StandardScaler())
self.scalerList[0].fit(dataset)
return
def transformSingleDf(self, scaler, dfElt):
totalVariance = np.sum(scaler.var_)
if totalVariance <= 1e-6 : #Avoid mean scaling for constant data
return dfElt
if type(dfElt) == type(pd.Series()):
return pd.Series(np.ravel(scaler.transform(dfElt.values.reshape(1, -1))),
index = dfElt.index).rename(dfElt.name)
return pd.DataFrame(scaler.transform(dfElt),
index = dfElt.index,
columns = dfElt.columns)
def transform(self, dataset):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
scaledDfList = []
for k in range(tupleSize):
funcAccess = lambda x : x[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler = self.scalerList[k]
scaledDfList.append(np.ravel(self.transformSingleDf(scaler, dfElt).values))
#Flattened list of tuples
tupleList= list(zip(*scaledDfList))
#Merge all datasets into a single structure
if dataset.ndim==2 :
reshapedList = [tupleList[(i*dataset.shape[1]):((i+1)*dataset.shape[1])] for i in range(dataset.shape[0])]
return pd.DataFrame(reshapedList,
index = dataset.index,
columns = dataset.columns)
else :
reshapedList = tupleList
return pd.Series(reshapedList, index = dataset.index)
else :
return self.transformSingleDf(self.scalerList[0], dataset)
return None
def inverTransformSingleDf(self, scaler, dfElt):
totalVariance = np.sum(scaler.var_)
if totalVariance <= 1e-6 : #Avoid mean scaling for constant data
return dfElt
if type(dfElt) == type(pd.Series()):
return pd.Series(np.ravel(scaler.inverse_transform(dfElt.values.reshape(1, -1))),
index = dfElt.index).rename(dfElt.name)
return pd.DataFrame(scaler.inverse_transform(dfElt),
index = dfElt.index,
columns = dfElt.columns)
def inverse_transform(self, scaledDataset):
hasTupleElt = (type(scaledDataset.iloc[0,0] if scaledDataset.ndim==2 else scaledDataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(scaledDataset.iloc[0,0] if scaledDataset.ndim==2 else scaledDataset.iloc[0])
scaledDfList = []
for k in range(tupleSize):
funcAccess = lambda x : x[k]
dfElt = scaledDataset.applymap(funcAccess) if (type(scaledDataset) != type(pd.Series())) else scaledDataset.map(funcAccess)
scaler = self.scalerList[k]
scaledDfList.append(np.ravel(self.inverTransformSingleDf(scaler, dfElt).values))
#Flattened list of tuples
tupleList= list(zip(*scaledDfList))
#Merge all datasets into a single structure
if scaledDataset.ndim==2 :
reshapedList = [tupleList[(i*scaledDataset.shape[1]):((i+1)*scaledDataset.shape[1])] for i in range(scaledDataset.shape[0])]
return pd.DataFrame(reshapedList,
index = scaledDataset.index,
columns = scaledDataset.columns)
else :
reshapedList = tupleList
return pd.Series(reshapedList, index = scaledDataset.index)
else :
return self.inverTransformSingleDf(self.scalerList[0], scaledDataset)
return None
#Encapsulation class for Sklearn min max scaling
class standardMinMaxScale(customMeanStdScale):
def __init__(self, feature_range = (0,1)):
super().__init__()
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
self.scalerList = [MinMaxScaler() for i in range(tupleSize)]
for k in range(tupleSize):
funcAccess = lambda x : x[k]
scaler = self.scalerList[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler.fit(dfElt)
else :
self.scalerList = []
self.scalerList.append(MinMaxScaler())
self.scalerList[0].fit(dataset)
return
def selectLessCorrelatedFeatures(featureCorr, nbPoints):
objectiveFunction = lambda x : x.T @ featureCorr.values @ x
gradient = lambda x : (featureCorr.values + featureCorr.values.T) @ x
hessian = lambda x : featureCorr.values + featureCorr.values.T
nbRestart = 5
x0s = np.random.uniform(size=(nbRestart , featureCorr.shape[1]))
x0s = x0s * nbPoints / np.sum(x0s, axis = 1, keepdims=True)
bestSol = x0s[0,:]
bestVar = featureCorr.shape[1]
bounds = [[0,1]] * featureCorr.shape[1]
budgetAllocation = LinearConstraint(np.ones((1,featureCorr.shape[1])), [nbPoints], [nbPoints], keep_feasible = True)
for k in range(nbRestart):
res = minimize(objectiveFunction, x0s[k,:],
bounds = bounds,
constraints = budgetAllocation,
method = "trust-constr",
jac = gradient,
hess = hessian)
if (res.fun < bestVar) or (k==0) :
bestSol = res.x
bestVar = res.fun
print("Attempt no ", k, " ; best solution : ", bestSol, " ; best inertia : ", bestVar)
topnbPointsValue = -(np.sort(-bestSol)[nbPoints - 1])
optimalAllocation = pd.Series(bestSol, index = featureCorr.index)
return optimalAllocation[optimalAllocation >= topnbPointsValue].index
def isCSVFile(filename):
extension = filename[-3:]
return (extension == "csv")
#These class are responsible for :
# - passing the right data to the model for trainingData
# - converting data to the original format for plotting
class datasetATM:
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.trainingSetPercentage = trainingSetPercentage
self.pathToDataset = pathToDataset
self.activateScaling = scaleFeatures
self.isGridStable = True
self.testVol = None
self.trainVol = None
self.VolSerie = None
self.volScaler = None
self.scaledTrainVol = None
self.scaledTestVol = None
self.testCoordinates = None
self.trainCoordinates = None
self.CoordinatesSerie = None
self.coordinatesScaler = None
self.scaledTrainCoordinates = None
self.scaledTestCoordinates = None
self.testFwd = None
self.trainFwd = None
self.FwdSerie = None
self.fwdScaler = None
self.scaledTrainFwd = None
self.scaledTestFwd = None
self.testStrike = None
self.trainStrike = None
self.StrikeSerie = None
self.loadData()
self.scaleDataSets()
lambdaAppend = (lambda x : x[0].append(x[1]) if x[0] is not None else None)
self.fullHistory = list(map(lambdaAppend, zip(self.getTrainingDataForModel(),self.getTestingDataForModel())))
self.fullScaler = [self.volScaler, self.coordinatesScaler, self.fwdScaler, None]
self.gridSize = self.getTestingDataForModel()[0].shape[1]
return
def loadData(self):
raise NotImplementedError("Abstract class")
return
def sanityCheck(self):
print("Testing formatModelDataAsDataSet")
assert(equalDf(self.testVol.dropna(how="all").head(),
self.formatModelDataAsDataSet(self.getTestingDataForModel())[0].head()))
origData = self.formatModelDataAsDataSet(self.getTrainingDataForModel())
print("Testing coordinates")
assert(equalDf(self.trainCoordinates.head().applymap(lambda x : x[0]),
origData[1].head().applymap(lambda x : x[0])))
assert(equalDf(self.trainCoordinates.head().applymap(lambda x : x[1]),
origData[1].head().applymap(lambda x : x[1])))
print("Testing Forward")
assert(equalDf(self.getTrainingDataForModel()[2].head(),
self.convertRealDataToModelFormat(self.formatModelDataAsDataSet(self.getTrainingDataForModel()))[2].head()))
print("Testing masking function")
maskedDf = self.maskDataset(self.getTrainingDataForModel()[1]).dropna(how="all",axis=1).head()
assert(maskedDf.shape[1] == (self.gridSize - self.maskedPoints.size))
print("Testing convertRealDataToModelFormat")
assert(equalDf(self.trainVol.loc[origData[0].index].head(),
self.formatModelDataAsDataSet(self.convertRealDataToModelFormat(origData))[0].head()))
print("Success")
return
#When the grid is not fixed - i.e. volatilities time to maturities are sliding -
#we need to decide which instruments can be compared between two dates
def decideInvestableInstruments(self):
coordinatesDf = self.formatModelDataAsDataSet(self.getDataForModel())[1]
pairIndexHistory = []#series of pair of index
nextTTMDf = coordinatesDf.shift(-1).dropna(how = "all")
for serie in coordinatesDf.head(-1).iterrows():
currentDay = serie[1]
nextDay = nextTTMDf.loc[serie[0]]
currentRankForHedgeablePoints = currentDay.index
nextRankForHedgeablePoints = nextDay.index
pairIndexHistory.append((currentRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory.append((nextRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory = pd.Series(pairIndexHistory, index = coordinatesDf.index)
return pairIndexHistory
#List Format : First position vol, second position coordinates, third position forward, fourth position strike
def getTestingDataForModel(self):
return [self.scaledTestVol, self.scaledTestCoordinates, self.scaledTestFwd, self.testStrike]
def getTrainingDataForModel(self):
return [self.scaledTrainVol, self.scaledTrainCoordinates, self.scaledTrainFwd, self.trainStrike]
def getDataForModel(self, dates = None):
if dates is None :
return self.fullHistory
funcExtractDate = lambda x : x.loc[dates] if x is not None else None
return list(map(funcExtractDate, self.fullHistory))
#Tranform synthetic surfaces as model data
#Name of surfaces should be the date
def convertRealDataToModelFormat(self, unformattedSurface):
if(self.activateScaling):
if (type(unformattedSurface)==type(list())) and (len(unformattedSurface)==4):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].transform(x[0])
return list(map(lambdaTransform, zip(unformattedSurface, self.fullScaler)))
elif (type(unformattedSurface)!=type(list())) :
return self.volScaler.transform(unformattedSurface)
else :
raise("Can not format as model data")
return
return unformattedSurface
#Format data returned by a model to format
#For instance variation are transformed as level with yesterday volatilities
def formatModelDataAsDataSet(self, modelData):
if(self.activateScaling):
if (type(modelData)==type(list())) and (len(modelData)==4):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].inverse_transform(x[0])
return list(map(lambdaTransform, zip(modelData, self.fullScaler)))
elif (type(modelData)!=type(list())) :
return self.volScaler.inverse_transform(modelData)
else :
raise("Can not format as model data")
return
return modelData
def scaleDataSets(self):
if(self.activateScaling):
#Define MinMax scaling for volatility
self.volScaler = customMeanStdScale() #customMinMaxScale()
self.volScaler.fit(self.trainVol, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainVol = self.volScaler.transform(self.trainVol)
self.scaledTestVol = self.volScaler.transform(self.testVol)
#Define MinMax scaling for volatility
self.coordinatesScaler = customMeanStdScale() #customMinMaxScale()
self.coordinatesScaler.fit(self.trainCoordinates, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainCoordinates = self.coordinatesScaler.transform(self.trainCoordinates)
self.scaledTestCoordinates = self.coordinatesScaler.transform(self.testCoordinates)
#Define MinMax scaling for forward swap rates
self.fwdScaler = customMeanStdScale() # customMinMaxScale()
self.fwdScaler.fit(self.trainFwd)
self.scaledTrainFwd = self.fwdScaler.transform(self.trainFwd)
self.scaledTestFwd = self.fwdScaler.transform(self.testFwd)
else :
self.scaledTrainVol = self.trainVol
self.scaledTestVol = self.testVol
self.scaledTrainCoordinates = self.trainCoordinates
self.scaledTestCoordinates = self.testCoordinates
self.scaledTrainFwd = self.trainFwd
self.scaledTestFwd = self.testFwd
return
def getATMDataFromCSV(dataSetPath, trainingSetPercentage=0.8):
formattedHistory = extractDataFromCSV(dataSetPath)
#Filter only ATM volatility
ATMHistory = (formattedHistory[formattedHistory.index.get_level_values(indexRelStrike)=='A']
.reorder_levels([indexName, indexExpiry, indexTenor, indexRelStrike])
.sort_index())
#Remove strike from index as we consider only ATM
ATMHistory.index = ATMHistory.index.droplevel(3)
#Get Expiry and tenors shared by all dates
commonGridPoints = intersectionGrid(ATMHistory)
#Get indexer for multiindex
idx = pd.IndexSlice
#Filter data for Expiry and tenors common to all dates
commonATMHistory = ATMHistory.loc[idx[:,commonGridPoints.get_level_values(0),
commonGridPoints.get_level_values(1)],:]
#Feeding Data
#Take the first 80% dates as training set and the remaining ones as testing set
trainTmp,testTmp = splitTrainTestDataChronologically(commonATMHistory,trainingSetPercentage)
#Separate features between volatility, forward rate and Strike
testVol = splitHistory(testTmp,"Vol")
trainVol = splitHistory(trainTmp,"Vol")
testFwd = splitHistory(testTmp,"forward")
trainFwd = splitHistory(trainTmp,"forward")
testStrike = None
trainStrike = None
indexFunc = lambda x : pd.Series(x.index.values,
index = x.index)
trainCoordinates = trainVol.apply(indexFunc, axis=1)
testCoordinates = testVol.apply(indexFunc, axis=1)
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)
testCoordinates = pd.DataFrame(testCoordinates.values, index=testCoordinates.index)
return testVol, trainVol, testFwd, trainFwd, testCoordinates, trainCoordinates, testStrike, trainStrike
class dataSetATMCSV(datasetATM):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.nbExpiry = 0
self.nbTenors = 0
self.minExpiry = minExpiry
self.expiryTenorToRankSerie = None
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
listTokeep = [(0.08333333333333333,0.25),(0.08333333333333333,10.0),
(0.08333333333333333,30.0),(0.5,2.0),(0.5,15.0),
(5.0,1.0),(5.0,20.0),(10.0,5.0)]
self.setMaskedPoints(listTokeep)
def setMaskedPoints(self, completionPoints):
# self.maskedPoints = sampleSwaptionsToDelete(self.getTestingDataForModel(),
# completionRate)
fullObs = self.getTestingDataForModel()[1]
self.maskedPoints = fullObs.columns.difference(completionPoints)
if self.isGridStable :#Surface coordinates are the same for each day
#Matrix where True indicates that this point is completed (i.e. hidden on the market), false otherwise
maskMatrix = pd.Series(False, index = self.expiryTenorToRankSerie.index)
maskMatrix.loc[fullObs.iloc[0].loc[self.maskedPoints]] = True
self.maskSerie = pd.Series(maskMatrix.values, index = self.expiryTenorToRankSerie.values)
self.maskMatrix = maskMatrix.unstack(level=-1)
#Return a deep copy with masked values
def maskDataset(self, completeDataset):
maskedRank = self.maskedPoints
maskedDataset = completeDataset.copy()
if completeDataset.ndim == 1 :
maskedDataset.loc[maskedRank] = np.NaN
elif completeDataset.ndim == 2 :
maskedDataset[maskedRank] = np.NaN
return maskedDataset
def removeShortestExpiry(self, dataset):
if dataset is None :
return
#remove data with expiry inferior than minExpiry
hasExpiryColumn = ("Expiry" in dataset.columns.names)
columnsFilter = ((dataset.columns.get_level_values("Expiry")>=self.minExpiry) if hasExpiryColumn else
self.expiryTenorToRankSerie[self.expiryTenorToRankSerie.index.get_level_values("Expiry")>=self.minExpiry].values)
return dataset.filter(items=dataset.columns[columnsFilter])
def loadData(self):
tmp = getATMDataFromCSV(self.pathToDataset, self.trainingSetPercentage)
self.expiryTenorToRankSerie = pd.Series(tmp[4].columns,
index = pd.MultiIndex.from_tuples(tmp[4].iloc[0].values,
names=('Expiry', 'Tenor')))
self.expiryTenorToRankSerie = self.expiryTenorToRankSerie[self.expiryTenorToRankSerie.index.get_level_values("Expiry")>=self.minExpiry]
self.testVol = self.removeShortestExpiry(tmp[0])
self.trainVol = self.removeShortestExpiry(tmp[1])
self.testCoordinates = self.removeShortestExpiry(tmp[4])
self.trainCoordinates = self.removeShortestExpiry(tmp[5])
self.testFwd = self.removeShortestExpiry(tmp[2])
self.trainFwd = self.removeShortestExpiry(tmp[3])
self.testStrike = self.removeShortestExpiry(tmp[6])
self.trainStrike = self.removeShortestExpiry(tmp[7])
self.nbExpiry = self.trainFwd.columns.get_level_values("Expiry").unique().size
self.nbTenors = self.trainFwd.columns.get_level_values("Tenor").unique().size
self.gridSize = self.trainFwd.columns.size
return
def datasetSummary(self):
print("Number of days in dataset",
self.getDataForModel()[0].shape[0])
print("Number of days for testing", self.getTestingDataForModel()[0].shape[0])
print("Number of days for training", self.getTrainingDataForModel()[0].shape[0])
print("Working on ATM volatility level")
print("Number of points in the grid : ", self.gridSize)
print("Number of expiries : ", self.nbExpiry)
print("List : ", self.getTrainingDataForModel()[2].columns.get_level_values("Expiry").unique())
print("Number of tenors : ", self.nbTenors)
print("List : ", self.getTrainingDataForModel()[2].columns.get_level_values("Tenor").unique())
return
def getATMDataFromPickle(dataSetPath,
trainingSetPercentage=0.8,
minStrikeIndex = 0,
maturityStrikeIndex = 0):
with open(dataSetPath, "rb") as f :
objectRead = pickle.load(f)
def rankCalDays(dfDay):
return dfDay["nBizDays"].rank()
listRank = list(map(rankCalDays, objectRead))
dfRank = pd.concat(listRank)
dfConcat = pd.concat(objectRead)
dfConcat["Rank"] = dfRank
volDf = dfConcat.reset_index().set_index(["index", "Rank"]).drop(["Date", "Forwards", "nBizDays", "nCalDays", "diff Days"], axis=1, errors="ignore").unstack()
volDf.columns = volDf.columns.set_names("Moneyness",level=0)
volDf = volDf.dropna(how="all",axis=1).astype("float64")
fwdDf = dfConcat.reset_index().set_index(["index", "Rank"])["Forwards"].unstack()
coordinatesRankDf = dfConcat.reset_index().set_index(["index", "Rank"])["nBizDays"].unstack()
def bindBizDays(rows):
bizDays = coordinatesRankDf.loc[rows.name].astype("float64")
return pd.Series(list(zip(bizDays[rows.index.get_level_values("Rank")].values / 252.0,
np.log(rows.index.get_level_values("Moneyness").astype("float64")) )),
index = rows.index)
coordinatesDf = volDf.apply(bindBizDays, axis=1)
def getFwd(rowVol):
ttmRank = rowVol.index.get_level_values("Rank")
return pd.Series(fwdDf.loc[rowVol.name, ttmRank].values, index = rowVol.index)
#Search for point in the vol dataframe the corresponding forward
fwdDf = volDf.apply(getFwd, axis=1).dropna(how="all",axis=1).astype("float64")
firstTestingDate = int(volDf.index.shape[0] * trainingSetPercentage)
trainingDates = volDf.index[:firstTestingDate]
trainVol = volDf.loc[trainingDates]
testVol = volDf.drop(trainVol.index)
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
trainFwd = fwdDf.loc[trainVol.index]
trainFwd = pd.DataFrame(trainFwd.values, index=trainFwd.index)[trainVol.columns]
testFwd = fwdDf.drop(trainVol.index)
testFwd = | pd.DataFrame(testFwd.values, index=testFwd.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
( | Timestamp('2013-11-01 00:00:00') | pandas.Timestamp |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
| tm.assert_almost_equal(x, x_rec) | pandas.util.testing.assert_almost_equal |
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from tqdm import tqdm
import pandas as pd
import numpy as np
import json
import os
path = os.path.dirname(os.path.abspath(__file__))
list_dir = os.listdir(path + '/results/')
stage = [mpimg.imread(path+'/media/stage_{}.png'.format(i)) for i in range(1, 5)]
color = {'BUG2': 'gold', 'PDDRL': 'dodgerblue', 'PDSRL': 'springgreen', 'PDDRL-P': 'indigo', 'PDSRL-P': 'deeppink'}
sel = {'S1': 0, 'S2': 1, 'Su': 2, 'Sl': 3}
splitted_dir = list()
for dir in list_dir:
if dir != 'data' and dir.split('_')[0] != 'BUG2':
splitted_dir.append(dir.split('_'))
sorted_dir = sorted(splitted_dir, key=lambda row: row[1] if row[0] == 'BUG2' else row[3])
print('Dir:', sorted_dir)
sorted_dir = sorted_dir[14:]
for c, directory in tqdm(enumerate(sorted_dir), total=len(sorted_dir)):
with open(path+'/results/'+'_'.join(directory)+'/writer_data.json') as f:
data = json.load(f)
key_list = list(data.keys())
new_key_list = ["/".join(key.split('/')[-2:]) for key in key_list]
for i, key in enumerate(key_list):
data[new_key_list[i]] = data.pop(key)
df = pd.DataFrame(data, dtype=np.float32)
reward = df.iloc[:, df.columns == new_key_list[0]].to_numpy()
new_reward = list()
for i, t in enumerate(reward):
new_reward.append(t[0][-1])
timing = df.iloc[:, df.columns == new_key_list[1]].to_numpy()
new_timing = list()
for i, t in enumerate(timing):
new_timing.append(t[0][-1])
episode = df.iloc[:, df.columns == new_key_list[2]].to_numpy()
new_episode = list()
for i, t in enumerate(episode):
new_episode.append(t[0][-1])
df = pd.DataFrame({new_key_list[0]: list(new_reward), new_key_list[1]: list(new_timing), new_key_list[2]: list(new_episode)}, dtype=np.float32)
df = df.sort_values([new_key_list[2], new_key_list[0], new_key_list[1]], ascending=[True, False, False])
df = df.groupby(new_key_list[2]).first().reset_index()[1:]
if directory[0] != 'BUG2':
if directory[-1] != 'N':
name = "-".join([directory[0], directory[-1]])
else:
name = directory[0]
c = directory[-2]
else:
name = directory[0]
c = directory[1]
sucess_list = list()
for value in df[new_key_list[0]]:
if value == 200:
sucess_list.append(1)
else:
sucess_list.append(0)
sucess_rate = (sum(sucess_list) / len(sucess_list)) * 100
print('Data for', name, 'test simulations:')
print('Sucess rate:', sucess_rate, "%")
print('Episode reward mean:', df[new_key_list[0]].mean())
print('Episode reward std:', df[new_key_list[0]].std())
print('Episode timing mean:', df[new_key_list[1]].mean())
print('Episode timing std:', df[new_key_list[1]].std())
x = pd.DataFrame(data['agent_0/x']).iloc[:, 2].to_numpy().tolist()
y = | pd.DataFrame(data['agent_0/y']) | pandas.DataFrame |
#!/usr/bin/env python3
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import os
import json
import urllib.request
# 取得する時刻(Noneとすれば、最新のものを取得)
latest = None
#latest = "2021-05-02T14:30:00+09:00"
# データ取得部分
class AmedasStation():
def __init__(self, latest=None):
url = "https://www.jma.go.jp/bosai/amedas/data/latest_time.txt"
if latest is None:
latest = np.loadtxt(urllib.request.urlopen(url), dtype="str")
print(latest)
self.latest_time = | pd.to_datetime(latest) | pandas.to_datetime |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import math
class Scraper:
"""
The class Scraper scrapes all apartments for sale from website www.domoplius.lt
"""
def __init__(self, header: dict = {"User-Agent": "Mozilla/5.0"}):
"""
Inits Scraper Class
:param header: information of the browser
"""
self.header = header
def get_url(self, url: str) -> requests:
"""
Get response from given url.
:param url: url of website for scraping.
:return: response
"""
try:
response = requests.get(url, headers=self.header)
except requests.exceptions.RequestException as e:
print(e)
exit()
return response
def get_page_number(self, number_of_items: int) -> int:
"""
Returns the number of pages according to required number of samples. Round up the page number to bigger side.
:param number_of_items: number of items required to scrape.
:return: number of pages
"""
items_per_page = 30
page_number = number_of_items / items_per_page
return math.ceil(page_number)
def collect_information(self, number_of_items: int) -> pd.DataFrame or None:
"""
Download all information from html at given url to the local filesystem:
title, area of flat, number of rooms, year of construction, floor, price.
:param number_of_items: number of items required to scrape.
:return: dataframe
"""
title, area, room, year, floor, price = ([] for i in range(6))
try:
for page_no in range(0, self.get_page_number(number_of_items)):
req = self.get_url(
f"https://domoplius.lt/skelbimai/butai?action_type=1&page_nr={page_no}&slist=109410160")
soup = BeautifulSoup(req.text, 'html.parser')
listings = soup.find_all("div", {"class": ["cntnt-box-fixed"]})
for item in listings:
area.extend([value.text.strip(" m²") for value in item.find_all("span", {"title": "Buto plotas (kv. m)"})])
room.extend([value.text.strip(" kamb.") for value in item.find_all("span", {"title": "Kambarių skaičius"})])
year.extend([value.text.strip(" m.") for value in item.find_all("span", {"title": "Statybos metai"})])
floor.extend([value.text.strip(" a.") for value in item.find_all("span", {"title": "Aukštas"})])
title.extend([value.text.strip(" ") for value in item.find_all("h2", {"class": "title-list"})])
price.extend([value.text.strip("Kaina: ") for value in item.find_all("p", {"class": "fl"})])
return pd.DataFrame({
"title": title,
"area": area,
"room": room,
"floor": floor,
"year": year,
"price": price,
})
except AttributeError:
return None
def write_to_csv(self, number_of_items) -> None:
"""
Write dataframe to csv file.
:param number_of_items: number of items required to scrape.
:return: csv file.
"""
all_information = self.collect_information(number_of_items)
| pd.DataFrame(all_information) | pandas.DataFrame |
"""
########################################################################
The azmet_maricopa.py module contains the AzmetMaricopa class, which
inherits from the pyfao56 Weather class in weather.py. AzmetMaricopa
provides specific I/O functionality for obtaining required weather input
data from the Arizona Meteorological Network (AZMET) station in
Maricopa, Arizona.
01/07/2016 Initial Python functions developed by <NAME>
11/04/2021 Finalized updates for inclusion in the pyfao56 Python package
########################################################################
"""
from pyfao56 import Weather
import datetime
import urllib3
import math
import pandas as pd
from .forecast import Forecast
class AzmetMaricopa(Weather):
"""A class for obtaining weather data for Maricopa, Arizona
Obtains and prepares weather data from the Arizona Meteorological
Network (AZMET) station in Maricopa, Arizona. If necessary, obtains
a 7-day weather forecast for Maricopa from the National Digital
Forecast Database (NDFD) using the Forecast class in forecast.py.
Computes ASCE Standardized Reference Evapotranspiration for the
resulting data set. Checks for missing weather data. The class
inherits from the pyfao56 Weather class.
Attributes
----------
rfcrp : str
Type of reference crop - Short ('S') or Tall ('T')
z : float
Weather station elevation (z) (m)
lat : float
Weather station latitude (decimal degrees)
wndht : float
Weather station wind speed measurement height (m)
cnames : list
Column names for wdata
wdata : DataFrame
Weather data as float
index - Year and day of year as string ('yyyy-ddd')
columns - ['Srad','Tmax','Tmin','Tdew','RHmax','RHmin',
'Wndsp','Rain','ETref','MorP']
Srad - Incoming solar radiation (MJ/m2)
Tmax - Daily maximum air temperature (deg C)
Tmin - Daily minimum air temperature (deg C)
Tdew - Daily average dew point temperature (deg C)
RHmax - Daily maximum relative humidity (%)
RHmin - Daily minimum relative humidity (%)
Wndsp - Daily average wind speed (m/s)
Rain - Daily precipitation (mm)
ETref - Daily reference ET (mm)
MorP - Measured ('M') or Predicted ('P') data
Methods
-------
customload(start,end,usefc=True)
Overridden method from the pyfao56 Weather class to provide
customization for loading weather data from the Maricopa AZMET
station and weather forecasts from the National Digital Forecast
Database (NDFD).
"""
def customload(self,start,end,rfcrp='S',usefc=True):
"""Prepare the wdata DataFrame with required weather data.
Parameters
----------
start : str
Simulation start year and doy ('yyyy-ddd')
end : str
Simulation end year and doy ('yyyy-ddd')
rfcrp : str, optional
Define the reference crop (default='S')
usefc : bool, optional
Use the 7-day NDFD weather forecast or not (default=True)
"""
#Define Maricopa weather station parameters
self.rfcrp = rfcrp #Set the reference crop
self.z = 361.000 #Weather station elevation (z) (m)
self.lat = 33.0690 #Weather station latitude (deg)
self.wndht = 3.00000 #Wind speed measurement height (m)
#Retrieve AZMET weather history for Maricopa
print('Retrieving AZMET weather history for Maricopa, AZ...')
today = datetime.datetime.today()
azmet = []
for year in range(1987,int(today.strftime('%Y'))+1):
print('Retrieving {:4d} data...'.format(year))
year2 = ('{:4d}'.format(year))[2:4]
client = urllib3.PoolManager()
url = 'http://ag.arizona.edu/azmet/data/06'+year2+'rd.txt'
page = client.request('GET',url,retries=9999)
weatherdata = page.data.decode('utf-8').split('\n')[:-1]
for line in weatherdata:
if line in ['']: continue
line = line.rstrip().split(',')
lineyear = int(line[0])
linedoy = int(line[1])
if lineyear < 100: lineyear+=1900
mykey = '{:04d}-{:03d}'.format(lineyear,linedoy)
mydict = {}
mydict.update({'Date':mykey})
if 0.0 <= float(line[10]) <= 110.0:
mydict.update({'Srad':float(line[10])})
if 0.0 <= float(line[3]) <= 55.0:
mydict.update({'Tmax':float(line[3])})
if -15.0 <= float(line[4]) <= 40.0:
mydict.update({'Tmin':float(line[4])})
if year >= 2003:
if -50.0 <= float(line[27]) <= 50.0:
mydict.update({'Tdew':float(line[27])})
else:
tavg = float(line[5]) #Avg daily temperature
havg = float(line[8]) #Avg relative humidity
#From https://cals.arizona.edu/azmet/dewpoint.html
B = math.log(havg/100.0)+((17.27*tavg)/(237.3+tavg))
B = B/17.27
D = (237.3*B)/(1-B)
if -50.0 <= D <= 50.0:
mydict.update({'Tdew':D})
if 0.0 <= float(line[6]) <= 100.0:
mydict.update({'RHmax':float(line[6])})
if 0.0 <= float(line[7]) <= 100.0:
mydict.update({'RHmin':float(line[7])})
if 0.0 <= float(line[18]) <= 30.0:
mydict.update({'Wndsp':float(line[18])})
if 0.0 <= float(line[11]) <= 200.0:
mydict.update({'Rain':float(line[11])})
azmet.append(mydict)
azmet = pd.DataFrame(azmet)
nanrows = azmet.isna().any(1).to_numpy().nonzero()[0]
for item in nanrows:
mykey = azmet.loc[item,'Date']
print('Warning: Questionable weather data: ' + mykey)
if len(nanrows) > 0:
result = input('Continue (Y/N)?')
if result not in ['Y']:
return
#Process AZMET data from requested start to end date
startDate = datetime.datetime.strptime(start, '%Y-%j')
endDate = datetime.datetime.strptime(end, '%Y-%j')
tdelta = datetime.timedelta(days=1)
yesterday = today - tdelta
tcurrent = startDate
future = []
wthdata = []
needfuture = False
print('Processing AZMET weather data...')
while tcurrent <= endDate:
mykey = tcurrent.strftime('%Y-%j')
if tcurrent <= yesterday: #Get data for days prior to today
daydata = azmet.loc[azmet['Date'] == mykey]
daydata = daydata.to_dict('records')[0]
daydata.update({'MorP':'M'})
wthdata.append(daydata)
else: #Predict future data as average of past data on a day
needfuture = True
future[:] = []
for year in range(1987,int(tcurrent.strftime('%Y'))):
mon = int(tcurrent.strftime('%m')) #month
dom = int(tcurrent.strftime('%d')) #day of month
if mon==2 and dom==29: #leap day
if year%4: #not leapyear, use feb 28 data
feb28=tcurrent-tdelta
pastdate = feb28.replace(year=year)
else:
pastdate = tcurrent.replace(year=year)
pastkey = pastdate.strftime('%Y-%j')
daydata = azmet.loc[azmet['Date'] == pastkey]
daydata = daydata.to_dict('records')[0]
future.append(daydata)
futmean = | pd.DataFrame(future) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
"""
APIs to interact with Oracle's Model Deployment service.
There are three main classes: ModelDeployment, ModelDeploymentDetails, ModelDeployer.
One creates a ModelDeployment and deploys it under the umbrella of the ModelDeployer class. This way
multiple ModelDeployments can be unified with one ModelDeployer. The ModelDeployer class also serves
as the interface to all the deployments. ModelDeploymentDetails holds information about the particular
details of a particular deployment, such as how many instances, etc. In this way multiple, independent
ModelDeployments with the same details can be created using the ModelDeployer class.
Examples
--------
>>> from model_deploy.model_deployer import ModelDeployer, ModelDeploymentDetails
>>> deployer = ModelDeployer("model_dep_conf.yaml")
>>> deployment_properties = ModelDeploymentProperties(
... 'ocid1.datasciencemodel.ocn.reg.xxxxxxxxxxxxxxxxxxxxxxxxx')
... .with_prop('display_name', "My model display name")
... .with_prop("project_id", project_id)
... .with_prop("compartment_id", compartment_id)
... .with_instance_configuration(
... config={"INSTANCE_SHAPE":"VM.Standard2.1",
... "INSTANCE_COUNT":"1",
... 'bandwidth_mbps':10})
... .build()
>>> deployment_info = deployer.deploy(deployment_properties,
... max_wait_time=600, poll_interval=15)
>>> print(deployment_info.model_deployment_id)
>>> print(deployment_info.workflow_req_id)
>>> print(deployment_info.url)
>>> deployer.list_deployments() # Optionally pass in a status
"""
from typing import Union, Dict
import pandas as pd
import oci.pagination
from ads.common.auth import default_signer
from .common import utils
from .common.utils import OCIClientManager, State
from .model_deployment import ModelDeployment, DEFAULT_WAIT_TIME, DEFAULT_POLL_INTERVAL
from .model_deployment_properties import ModelDeploymentProperties
class ModelDeployer:
"""ModelDeployer is the class responsible for deploying the ModelDeployment
Attributes
----------
config : dict
ADS auth dictionary for OCI authentication.
ds_client : DataScienceClient
data science client
ds_composite_client : DataScienceCompositeClient
composite data science client
Methods
-------
deploy(model_deployment_details, **kwargs)
Deploy the model specified by `model_deployment_details`.
get_model_deployment(model_deployment_id:str)
Get the ModelDeployment specified by `model_deployment_id`.
get_model_deployment_state(model_deployment_id)
Get the state of the current deployment specified by id.
delete(model_deployment_id, **kwargs)
Remove the model deployment specified by the id or Model Deployment Object
list_deployments(status)
lists the model deployments associated with current compartment and data
science client
show_deployments(status)
shows the deployments filtered by `status` in a Dataframe
"""
def __init__(self, config: dict = None):
"""Initializes model deployer.
Parameters
----------
config : dict, optional
ADS auth dictionary for OCI authentication.
This can be generated by calling ads.common.auth.api_keys() or ads.common.auth.resource_principal().
If this is None, ads.common.default_signer(client_kwargs) will be used.
"""
if not config:
config = default_signer()
self.config = config
self.client_manager = OCIClientManager(config)
self.ds_client = self.client_manager.ds_client
self.ds_composite_client = self.client_manager.ds_composite_client
def deploy(
self,
properties: Union[ModelDeploymentProperties, Dict] = None,
wait_for_completion: bool = True,
max_wait_time: int = DEFAULT_WAIT_TIME,
poll_interval: int = DEFAULT_POLL_INTERVAL,
**kwargs,
) -> ModelDeployment:
"""Deploys a model.
Parameters
----------
properties : ModelDeploymentProperties or dict
Properties to deploy the model.
Properties can be None when kwargs are used for specifying properties.
wait_for_completion : bool
Flag set for whether to wait for deployment to complete before proceeding.
Optional, defaults to True.
max_wait_time : int
Maximum amount of time to wait in seconds. Optional, defaults to 1200.
Negative value implies infinite wait time.
poll_interval : int
Poll interval in seconds. Optional, defaults to 30.
kwargs :
Keyword arguments for initializing ModelDeploymentProperties.
See ModelDeploymentProperties() for details.
Returns
-------
ModelDeployment
A ModelDeployment instance.
"""
model_deployment = ModelDeployment(
properties,
config=self.config,
**kwargs,
)
return model_deployment.deploy(
wait_for_completion, max_wait_time, poll_interval
)
def deploy_from_model_uri(
self,
model_uri: str,
properties: Union[ModelDeploymentProperties, Dict] = None,
wait_for_completion: bool = True,
max_wait_time: int = DEFAULT_WAIT_TIME,
poll_interval: int = DEFAULT_POLL_INTERVAL,
**kwargs,
) -> ModelDeployment:
"""Deploys a model.
Parameters
----------
model_uri : str
uri to model files, can be local or in cloud storage
properties : ModelDeploymentProperties or dict
Properties to deploy the model.
Properties can be None when kwargs are used for specifying properties.
wait_for_completion : bool
Flag set for whether to wait for deployment to complete before proceeding.
Defaults to True
max_wait_time : int
Maximum amount of time to wait in seconds (Defaults to 1200).
Negative implies infinite wait time.
poll_interval : int
Poll interval in seconds (Defaults to 30).
kwargs :
Keyword arguments for initializing ModelDeploymentProperties
Returns
-------
ModelDeployment
A ModelDeployment instance
"""
if properties:
model_id = self.client_manager.prepare_artifact(
model_uri=model_uri, properties=properties
)
properties.model_deployment_configuration_details.model_configuration_details.model_id = (
model_id
)
else:
model_id = self.client_manager.prepare_artifact(
model_uri=model_uri, properties=kwargs
)
kwargs["model_id"] = model_id
return self.deploy(
properties,
wait_for_completion=wait_for_completion,
max_wait_time=max_wait_time,
poll_interval=poll_interval,
**kwargs,
)
def update(
self,
model_deployment_id: str,
properties: ModelDeploymentProperties = None,
wait_for_completion: bool = True,
max_wait_time: int = DEFAULT_WAIT_TIME,
poll_interval: int = DEFAULT_POLL_INTERVAL,
**kwargs,
) -> ModelDeployment:
"""Updates an existing model deployment.
Parameters
----------
model_deployment_id : str
Model deployment OCID.
properties : ModelDeploymentProperties
An instance of ModelDeploymentProperties or dict to initialize the ModelDeploymentProperties.
Defaults to None.
wait_for_completion : bool
Flag set for whether to wait for deployment to complete before proceeding.
Defaults to True.
max_wait_time : int
Maximum amount of time to wait in seconds (Defaults to 1200).
poll_interval : int
Poll interval in seconds (Defaults to 30).
kwargs :
Keyword arguments for initializing ModelDeploymentProperties.
Returns
-------
ModelDeployment
A ModelDeployment instance
"""
model_deployment = self.get_model_deployment(model_deployment_id)
# Deployment properties will be refreshed within model_deployment.update() when update is done.
return model_deployment.update(
properties,
wait_for_completion,
max_wait_time=max_wait_time,
poll_interval=poll_interval,
**kwargs,
)
def get_model_deployment(self, model_deployment_id: str) -> ModelDeployment:
"""Gets a ModelDeployment by OCID.
Parameters
----------
model_deployment_id : str
Model deployment OCID
Returns
-------
ModelDeployment
A ModelDeployment instance
"""
try:
oci_model_deployment_object = self.ds_client.get_model_deployment(
model_deployment_id
).data
model_deployment_object = ModelDeployment(
oci_model_deployment_object,
config=self.config,
)
return model_deployment_object
except Exception as e:
utils.get_logger().error(
"Getting model deployment failed with error: %s", e
)
raise e
def get_model_deployment_state(self, model_deployment_id: str) -> State:
"""Gets the state of a deployment specified by OCID
Parameters
----------
model_deployment_id : str
Model deployment OCID
Returns
-------
str
The state of the deployment
"""
model_deployment = self.get_model_deployment(model_deployment_id)
return model_deployment.state
def delete(
self,
model_deployment_id,
wait_for_completion: bool = True,
max_wait_time: int = DEFAULT_WAIT_TIME,
poll_interval: int = DEFAULT_POLL_INTERVAL,
) -> ModelDeployment:
"""Deletes the model deployment specified by OCID.
Parameters
----------
model_deployment_id : str
Model deployment OCID.
wait_for_completion : bool
Wait for deletion to complete. Defaults to True.
max_wait_time : int
Maximum amount of time to wait in seconds (Defaults to 600).
Negative implies infinite wait time.
poll_interval : int
Poll interval in seconds (Defaults to 60).
Returns
-------
A ModelDeployment instance that was deleted
"""
try:
model_deployment_object = self.get_model_deployment(model_deployment_id)
return model_deployment_object.delete(
wait_for_completion, max_wait_time, poll_interval
)
except Exception as e:
utils.get_logger().error(
"Deleting model deployment failed with error: %s", format(e)
)
raise e
def list_deployments(self, status=None, compartment_id=None, **kwargs) -> list:
"""Lists the model deployments associated with current compartment and data science client
Parameters
----------
status : str
Status of deployment. Defaults to None.
compartment_id : str
Target compartment to list deployments from.
Defaults to the compartment set in the environment variable "NB_SESSION_COMPARTMENT_OCID".
If "NB_SESSION_COMPARTMENT_OCID" is not set, the root compartment ID will be used.
An ValueError will be raised if root compartment ID cannot be determined.
kwargs :
The values are passed to oci.data_science.DataScienceClient.list_model_deployments.
Returns
-------
list
A list of ModelDeployment objects.
Raises
------
ValueError
If compartment_id is not specified and cannot be determined from the environment.
"""
if not compartment_id:
compartment_id = self.client_manager.default_compartment_id()
if not compartment_id:
raise ValueError(
"Unable to determine compartment ID from environment. Specify compartment_id."
)
if isinstance(status, State):
status = status.name
if status is not None:
kwargs["lifecycle_state"] = status
# https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/api/pagination.html#module-oci.pagination
deployments = oci.pagination.list_call_get_all_results(
self.ds_client.list_model_deployments, compartment_id, **kwargs
).data
return [
ModelDeployment(deployment, config=self.config)
for deployment in deployments
]
def show_deployments(
self,
status=None,
compartment_id=None,
) -> pd.DataFrame:
"""Returns the model deployments associated with current compartment and data science client
as a Dataframe that can be easily visualized
Parameters
----------
status : str
Status of deployment. Defaults to None.
compartment_id : str
Target compartment to list deployments from.
Defaults to the compartment set in the environment variable "NB_SESSION_COMPARTMENT_OCID".
If "NB_SESSION_COMPARTMENT_OCID" is not set, the root compartment ID will be used.
An ValueError will be raised if root compartment ID cannot be determined.
Returns
-------
DataFrame
pandas Dataframe containing information about the ModelDeployments
Raises
------
ValueError
If compartment_id is not specified and cannot be determined from the environment.
"""
if not compartment_id:
compartment_id = self.client_manager.default_compartment_id()
if not compartment_id:
raise ValueError(
"Unable to determine compartment ID from environment. Specify compartment_id."
)
if type(status) == str or status == None:
status = State._from_str(status)
model_deployments = self.ds_client.list_model_deployments(compartment_id).data
display = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock
from copy import deepcopy
import pandas
from .utils import load_data
from tests.utils.df_handler import transform_df
def set_list_tables_mock(client):
list_tables_response = load_data("redshift-data-list-tables-response.json")
list_tables_mock = MagicMock(return_value=list_tables_response)
client.set_mock("ListTables", list_tables_mock)
return list_tables_mock
def set_execute_statement_mock(client, check_kwargs=None):
# to pass parmas to describe_statement_mock
info_for_statements = {}
execute_statement_response_base = load_data(
"redshift-data-execute-statement-response-base.json"
)
execute_statement_mock = MagicMock()
def execute_statement_side_effect(*args, **kwargs):
cluster_identifier = kwargs["ClusterIdentifier"]
database = kwargs["Database"]
sql = kwargs["Sql"]
if check_kwargs:
check_kwargs(kwargs)
response = deepcopy(execute_statement_response_base)
response["ClusterIdentifier"] = cluster_identifier
response["Database"] = database
response["Id"] = "{}{:0=2}".format(
response["Id"], execute_statement_mock.call_count
)
info_for_statement = info_for_statements.setdefault(response["Id"], {})
info_for_statement["ClusterIdentifier"] = cluster_identifier
info_for_statement["Database"] = database
info_for_statement["Sql"] = sql
return response
execute_statement_mock.side_effect = execute_statement_side_effect
client.set_mock("ExecuteStatement", execute_statement_mock)
return info_for_statements, execute_statement_mock
def set_describe_statement_mock(client, info_for_statements, **response_diff):
describe_statement_response_base = load_data(
"redshift-data-describe-statement-response-base.json"
)
describe_statement_mock = MagicMock()
def describe_statement_side_effect(*args, **kwargs):
statement_id = kwargs["Id"]
info_for_statement = info_for_statements[statement_id]
sql = info_for_statement["Sql"]
cluster_identifier = info_for_statement["ClusterIdentifier"]
response = deepcopy(describe_statement_response_base)
response["Id"] = statement_id
response["ClusterIdentifier"] = cluster_identifier
response["QueryString"] = sql
response.update(response_diff)
return response
describe_statement_mock.side_effect = describe_statement_side_effect
client.set_mock("DescribeStatement", describe_statement_mock)
return describe_statement_mock
def test_to_redshift_w_no_secret_arn_and_no_db_user_should_fail(
writer_under_test,
):
from pandas_amazon_redshift.errors import InvalidAuthentication
with pytest.raises(InvalidAuthentication):
writer_under_test(
pandas.DataFrame([[1]], columns=["col"]),
table="table",
dtype={"col": "INTEGER"},
)
def test_read_redshift_w_no_secret_arn_and_no_db_user_should_fail(
reader_under_test,
):
from pandas_amazon_redshift.errors import InvalidAuthentication
with pytest.raises(InvalidAuthentication):
reader_under_test("SELECT 1")
@pytest.fixture()
def mock_boto3_client_for_reader(mock_boto3_client):
client = mock_boto3_client("redshift-data")
info_for_statements, _ = set_execute_statement_mock(client)
set_describe_statement_mock(client, info_for_statements)
return client
@pytest.mark.parametrize(
"config_path",
[
"config-read-redshift-dtype-emptytbl.json",
"config-read-redshift-dtype-1.json",
"config-read-redshift-dtype-booltbl.json",
"config-read-redshift-dtype-floattbl.json",
"config-read-redshift-dtype-inttbl.json",
"config-read-redshift-dtype-texttbl.json",
"config-read-redshift-dtype-misctbl.json",
"config-read-redshift-dtype-numerictbl.json",
"config-read-redshift-dtype-datetimetbl.json",
"config-read-redshift-dtype-supertbl.json",
"config-read-redshift-next-token.json",
],
)
def test_read_redshift_success(
reader_under_test, mock_boto3_client_for_reader, config_path
):
config = load_data(config_path)
def get_statement_result_side_effect(*args, **kwargs):
response = config["GetStatementResultResponse"]
next_token = kwargs.get("NextToken", "default")
return response[next_token]
get_statement_result_mock = MagicMock(
side_effect=get_statement_result_side_effect
)
mock_boto3_client_for_reader.set_mock(
"GetStatementResult", get_statement_result_mock
)
expected_df_config = config["ExpectedDf"]
expected_df = pandas.DataFrame(expected_df_config["Data"])
expected_df = transform_df(expected_df, expected_df_config)
actual_df = reader_under_test(config["Sql"], db_user="testuser")
actual_df_config = config.get("ActualDf", {})
actual_df = transform_df(actual_df, actual_df_config)
pandas.testing.assert_frame_equal(actual_df, expected_df, check_exact=True)
@pytest.mark.parametrize(
"mock_args, sql, error_cls, error_msg",
[
(
{
"Status": "FAILED",
"Error": 'ERROR: relation "not_existing_tbl" does not exist',
},
"SELECT col FROM not_existing_tbl",
"QueryFailedError",
r"The following query was failed "
r"\[ID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX01 \(sql: '{}'\)\]\n"
r"\({}\)",
),
(
{
"Status": "ABORTED",
},
"SELECT 1",
"QueryAbortedError",
r"The following query was stopped by user "
r"\[ID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX01 \(sql: '{}'\)\]",
),
],
)
def test_read_redshift_fail(
reader_under_test,
mock_boto3_client,
mock_args,
sql,
error_cls,
error_msg,
):
from pandas_amazon_redshift.errors import QueryFailedError # noqa
from pandas_amazon_redshift.errors import QueryAbortedError # noqa
client = mock_boto3_client("redshift-data")
info_for_statements, _ = set_execute_statement_mock(client)
set_describe_statement_mock(client, info_for_statements, **mock_args)
format_args = [sql]
if "Error" in mock_args:
format_args.append(mock_args["Error"])
error_msg = error_msg.format(*format_args)
with pytest.raises(locals()[error_cls], match=error_msg):
reader_under_test(
sql,
secret_arn="arn:aws:secretsmanager:us-east-1:"
"012345678901:secret:TestSecret-ZZZZZZ",
)
@pytest.fixture()
def mock_for_writer(mock_boto3_client):
client = mock_boto3_client("redshift-data")
set_list_tables_mock(client)
info_for_statements, execute_statement_mock = set_execute_statement_mock(
client
)
set_describe_statement_mock(client, info_for_statements)
return execute_statement_mock
def test_to_redshift_if_exists_fail(writer_under_test, mock_for_writer):
from pandas_amazon_redshift.errors import TableCreationError
expected_error_msg = (
"Could not create the table "
"existing_tbl in the schema public because it already exists."
)
with pytest.raises(TableCreationError, match=expected_error_msg):
writer_under_test(
pandas.DataFrame([[1]], columns=["col"]),
table="existing_tbl",
dtype={"col": "INTEGER"},
db_user="testuser",
)
def test_to_redshift_success(
writer_under_test,
mock_for_writer,
cluster_identifier,
database,
):
from pandas_amazon_redshift.types import Integer
writer_under_test(
| pandas.DataFrame([[1, 1]]) | pandas.DataFrame |
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import datetime
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data_df=pd.read_csv('/kaggle/input/hotel-booking-demand/hotel_bookings.csv')
data_df.info()
data_df['time_concat']=pd.to_datetime(data_df['arrival_date_year'].astype(int).astype(str) + data_df['arrival_date_month'] + data_df['arrival_date_day_of_month'].astype(int).astype(str),format='%Y%B%d')
data_df['time_sub']=data_df['time_concat']- | pd.to_datetime(data_df['reservation_status_date']) | pandas.to_datetime |
# overlap coefficient join
from joblib import delayed, Parallel
from six import iteritems
import pandas as pd
import pyprind
from py_stringsimjoin.filter.overlap_filter import OverlapFilter
from py_stringsimjoin.index.inverted_index import InvertedIndex
from py_stringsimjoin.utils.generic_helper import convert_dataframe_to_array, \
find_output_attribute_indices, get_attrs_to_project, \
get_num_processes_to_launch, get_output_header_from_tables, \
get_output_row_from_tables, remove_redundant_attrs, split_table, COMP_OP_MAP
from py_stringsimjoin.utils.missing_value_handler import \
get_pairs_with_missing_value
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_comp_op_for_sim_measure, validate_key_attr, \
validate_input_table, validate_threshold, validate_tokenizer, \
validate_output_attrs
def overlap_coefficient_join_py(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op='>=',
allow_empty=True, allow_missing=False,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
out_sim_score=True, n_jobs=1, show_progress=True):
"""Join two tables using overlap coefficient.
For two sets X and Y, the overlap coefficient between them is given by:
:math:`overlap\\_coefficient(X, Y) = \\frac{|X \\cap Y|}{\\min(|X|, |Y|)}`
In the case where one of X and Y is an empty set and the other is a
non-empty set, we define their overlap coefficient to be 0. In the case
where both X and Y are empty sets, we define their overlap coefficient to
be 1.
Finds tuple pairs from left table and right table such that the overlap
coefficient between the join attributes satisfies the condition on input
threshold. For example, if the comparison operator is '>=', finds tuple
pairs whose overlap coefficient between the strings that are the values of
the join attributes is greater than or equal to the input threshold, as
specified in "threshold".
Args:
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_join_attr (string): join attribute in left table.
r_join_attr (string): join attribute in right table.
tokenizer (Tokenizer): tokenizer to be used to tokenize join
attributes.
threshold (float): overlap coefficient threshold to be satisfied.
comp_op (string): comparison operator. Supported values are '>=', '>'
and '=' (defaults to '>=').
allow_empty (boolean): flag to indicate whether tuple pairs with empty
set of tokens in both the join attributes should be included in the
output (defaults to True).
allow_missing (boolean): flag to indicate whether tuple pairs with
missing value in at least one of the join attributes should be
included in the output (defaults to False). If this flag is set to
True, a tuple in ltable with missing value in the join attribute
will be matched with every tuple in rtable and vice versa.
l_out_attrs (list): list of attribute names from the left table to be
included in the output table (defaults to None).
r_out_attrs (list): list of attribute names from the right table to be
included in the output table (defaults to None).
l_out_prefix (string): prefix to be used for the attribute names coming
from the left table, in the output table (defaults to 'l\_').
r_out_prefix (string): prefix to be used for the attribute names coming
from the right table, in the output table (defaults to 'r\_').
out_sim_score (boolean): flag to indicate whether similarity score
should be included in the output table (defaults to True). Setting
this flag to True will add a column named '_sim_score' in the
output table. This column will contain the similarity scores for the
tuple pairs in the output.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used
(where n_cpus is the total number of CPUs in the machine). Thus for
n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs)
becomes less than 1, then no parallel computing code will be used
(i.e., equivalent to the default).
show_progress (boolean): flag to indicate whether task progress should
be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs that satisfy the join
condition (DataFrame).
"""
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes and join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_join_attr, ltable.columns,
'join attribute', 'left table')
validate_attr(r_join_attr, rtable.columns,
'join attribute', 'right table')
# check if the join attributes are not of numeric type
validate_attr_type(l_join_attr, ltable[l_join_attr].dtype,
'join attribute', 'left table')
validate_attr_type(r_join_attr, rtable[r_join_attr].dtype,
'join attribute', 'right table')
# check if the input tokenizer is valid
validate_tokenizer(tokenizer)
# check if the input threshold is valid
validate_threshold(threshold, 'OVERLAP_COEFFICIENT')
# check if the comparison operator is valid
validate_comp_op_for_sim_measure(comp_op, 'OVERLAP_COEFFICIENT')
# check if the output attributes exist
validate_output_attrs(l_out_attrs, ltable.columns,
r_out_attrs, rtable.columns)
# check if the key attributes are unique and do not contain missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# set return_set flag of tokenizer to be True, in case it is set to False
revert_tokenizer_return_set_flag = False
if not tokenizer.get_return_set():
tokenizer.set_return_set(True)
revert_tokenizer_return_set_flag = True
# remove redundant attrs from output attrs.
l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr)
r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr)
# get attributes to project.
l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_join_attr)
r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_join_attr)
# Do a projection on the input dataframes to keep only the required
# attributes. Then, remove rows with missing value in join attribute from
# the input dataframes. Then, convert the resulting dataframes into ndarray.
ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs, l_join_attr)
rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs, r_join_attr)
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = _overlap_coefficient_join_split(
ltable_array, rtable_array,
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
else:
# if n_jobs is above 1, split the right table into n_jobs splits and
# join each right table split with the whole of left table in a separate
# process.
r_splits = split_table(rtable_array, n_jobs)
results = Parallel(n_jobs=n_jobs)(
delayed(_overlap_coefficient_join_split)(
ltable_array, r_splits[job_index],
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
# If allow_missing flag is set, then compute all pairs with missing value in
# at least one of the join attributes and then add it to the output
# obtained from the join.
if allow_missing:
missing_pairs = get_pairs_with_missing_value(
ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
output_table = pd.concat([output_table, missing_pairs])
# add an id column named '_id' to the output table.
output_table.insert(0, '_id', range(0, len(output_table)))
# revert the return_set flag of tokenizer, in case it was modified.
if revert_tokenizer_return_set_flag:
tokenizer.set_return_set(False)
return output_table
def _overlap_coefficient_join_split(ltable_list, rtable_list,
l_columns, r_columns,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress):
"""Perform overlap coefficient join for a split of ltable and rtable"""
# find column indices of key attr, join attr and output attrs in ltable
l_key_attr_index = l_columns.index(l_key_attr)
l_join_attr_index = l_columns.index(l_join_attr)
l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, join attr and output attrs in rtable
r_key_attr_index = r_columns.index(r_key_attr)
r_join_attr_index = r_columns.index(r_join_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# Build inverted index over ltable
inverted_index = InvertedIndex(ltable_list, l_join_attr_index,
tokenizer, cache_size_flag=True)
# While building the index, we cache the record ids with empty set of
# tokens. This is needed to handle the allow_empty flag.
cached_data = inverted_index.build(allow_empty)
l_empty_records = cached_data['empty_records']
overlap_filter = OverlapFilter(tokenizer, 1)
comp_fn = COMP_OP_MAP[comp_op]
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
prog_bar = pyprind.ProgBar(len(rtable_list))
for r_row in rtable_list:
r_string = r_row[r_join_attr_index]
r_join_attr_tokens = tokenizer.tokenize(r_string)
r_num_tokens = len(r_join_attr_tokens)
# If allow_empty flag is set and the current rtable record has empty set
# of tokens in the join attribute, then generate output pairs joining
# the current rtable record with those records in ltable with empty set
# of tokens in the join attribute. These ltable record ids are cached in
# l_empty_records list which was constructed when building the inverted
# index.
if allow_empty and r_num_tokens == 0:
for l_id in l_empty_records:
if has_output_attributes:
output_row = get_output_row_from_tables(
ltable_list[l_id], r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices,
r_out_attrs_indices)
else:
output_row = [ltable_list[l_id][l_key_attr_index],
r_row[r_key_attr_index]]
if out_sim_score:
output_row.append(1.0)
output_rows.append(output_row)
continue
# probe inverted index and find overlap of candidates
candidate_overlap = overlap_filter.find_candidates(
r_join_attr_tokens, inverted_index)
for cand, overlap in iteritems(candidate_overlap):
# compute the actual similarity score
sim_score = (float(overlap) /
float(min(r_num_tokens,
inverted_index.size_cache[cand])))
if comp_fn(sim_score, threshold):
if has_output_attributes:
output_row = get_output_row_from_tables(
ltable_list[cand], r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices, r_out_attrs_indices)
else:
output_row = [ltable_list[cand][l_key_attr_index],
r_row[r_key_attr_index]]
# if out_sim_score flag is set, append the overlap coefficient
# score to the output record.
if out_sim_score:
output_row.append(sim_score)
output_rows.append(output_row)
if show_progress:
prog_bar.update()
output_header = get_output_header_from_tables(l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix)
if out_sim_score:
output_header.append("_sim_score")
output_table = | pd.DataFrame(output_rows, columns=output_header) | pandas.DataFrame |
#Rule 9 - PROCESS_AGENT_ID should be alphanumberic and PROCESS_ID should be a number
def process_id(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from dateutil.parser import parse
import validators
file_name="Process_ID.py"
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Process_ID"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
def validate_process_id(string):
if(re.match("^[a-zA-Z0-9-_]+$",string)):
return False
else:
return True
def validate_process_agent_id(string):
if(re.match("^[-+]?[0-9]+$",string)):
return False
else:
return True
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
for index,row in df.iterrows():
process_id=row['PROCESS_ID']
process_agent_id=row['PROCESS_AGENT_ID']
if(pd.notnull(row['PROCESS_ID'])):
if(validate_process_id(process_id)):
entry=[index,fleName,'PROCESS_ID has space or any character other than aphanumeric']
data.append(entry)
if(pd.notnull(row['PROCESS_AGENT_ID'])):
if(validate_process_agent_id(str(process_agent_id))):
entry=[index,fleName,'PROCESS_AGENT_ID has any character other than numeric']
data.append(entry)
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if(ExcelFile(target).sheet_names[0] == 'Sheet1'):
with ExcelWriter(target, engine='openpyxl', mode='w') as writer:
df1.to_excel(writer,sheet_name=rule,index=False)
else:
with | ExcelWriter(target, engine='openpyxl', mode='a') | pandas.ExcelWriter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.