prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
from cognite.model_hosting.data_fetcher import DataFetcher
class Model:
"""
You need to have a class called Model in a file called model.py at the
top level of your package.
It should contain
- Static train method
Which performs training and persist any state you need for
prediction. This can be serialized models, csv, or something else.
You just have to be able to store it in files.
- Static load method
Which loads your persisted state and return an instance of the Model
class that are ready for predictions.
- Predict method
Which use the persisted state to do predictions.
"""
@staticmethod
def train(open_artifact, data_spec):
"""
open_artifact:
The train method must accept a open_artifact argument. This is a function
that works the same way as the builtin open(), except it reads from
and writes to the root of a special storage location in the cloud
that belongs to the current model version.
data_spec:
An argument we pass in ourself when we initiate the training.
api_key, project:
Optional arguments that are passed in automatically from Model
Hosting if you specify them.
"""
data_fetcher = DataFetcher(data_spec)
data_fetcher.files.fetch("data")
data_fetcher.files.fetch("target")
X = pd.read_csv("data")
y = | pd.read_csv("target") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 17:29:16 2018
@author: jdkern
"""
from __future__ import division
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def exchange(year):
df_data = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Load_Path_Sim.csv',header=0)
c = ['Path66_sim','Path46_sim','Path61_sim','Path42_sim','Path24_sim','Path45_sim']
df_data = df_data[c]
paths = ['Path66','Path46','Path61','Path42','Path24','Path45']
df_data.columns = paths
df_data = df_data.loc[year*365:year*365+364,:]
# select dispatchable imports (positve flow days)
imports = df_data
imports = imports.reset_index()
for p in paths:
for i in range(0,len(imports)):
if p == 'Path42':
if imports.loc[i,p] >= 0:
imports.loc[i,p] = 0
else:
imports.loc[i,p] = -imports.loc[i,p]
elif p == 'Path46':
if imports.loc[i,p] < 0:
imports.loc[i,p] = 0
else:
imports.loc[i,p] = imports.loc[i,p]*.404 + 424
else:
if imports.loc[i,p] < 0:
imports.loc[i,p] = 0
imports.rename(columns={'Path46':'Path46_SCE'}, inplace=True)
imports.to_csv('Path_setup/CA_imports.csv')
# convert to minimum flow time series and dispatchable (daily)
df_mins = pd.read_excel('Path_setup/CA_imports_minflow_profiles.xlsx',header=0)
lines = ['Path66','Path46_SCE','Path61','Path42']
for i in range(0,len(df_data)):
for L in lines:
if df_mins.loc[i,L] >= imports.loc[i,L]:
df_mins.loc[i,L] = imports.loc[i,L]
imports.loc[i,L] = 0
else:
imports.loc[i,L] = np.max((0,imports.loc[i,L]-df_mins.loc[i,L]))
dispatchable_imports = imports*24
dispatchable_imports.to_csv('Path_setup/CA_dispatchable_imports.csv')
df_data = pd.read_csv('Path_setup/CA_imports.csv',header=0)
# hourly minimum flow for paths
hourly = np.zeros((8760,len(lines)))
for i in range(0,365):
for L in lines:
index = lines.index(L)
hourly[i*24:i*24+24,index] = np.min((df_mins.loc[i,L], df_data.loc[i,L]))
H = pd.DataFrame(hourly)
H.columns = ['Path66','Path46_SCE','Path61','Path42']
H.to_csv('Path_setup/CA_path_mins.csv')
# hourly exports
df_data = | pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Load_Path_Sim.csv',header=0) | pandas.read_csv |
import logging
import os
import pandas as pd
import threading
import time
import sys
from threading import Event
from src.configuration.db import MySQLEngine
from src.stream.utils import utils
from src.stream.exceptions.exceptions import GeneralSyncException
class ProdDBArgs:
"""
DESCRIPTION
-----------
A class used to define static information for the set of databases including
ip addresses and remote login arguments.
MODIFICATIONS
-------------
Created : 5/6/19
"""
ips = {
"LOCAL": {
"internal":"127.0.0.1",
"public":"127.0.0.1",
"remote_port":3306
},
"DEV_1": {
"internal":"10.142.0.2",
"public":"172.16.17.32",
"remote_port":3307
},
"DEV_2": {
"internal":"10.142.0.3",
"public":"192.168.3.11",
"remote_port":3308
},
"DEV_3": {
"internal":"10.142.0.5",
"public":"192.168.3.11",
"remote_port":3309
},
"DEV_4": {
"internal":"10.142.0.6",
"public":"192.168.3.11",
"remote_port":3310
},
"LIVE_1": {
"internal":"10.142.0.4",
"public":"192.168.3.11",
"remote_port":3311
}
}
args = {
"user": "remote",
"password": "<PASSWORD>",
"database": "forex_etnrl",
"auth_plugin":"mysql_native_password",
"use_unicode": True,
"autocommit": True,
"reconnect_retry_count":3,
"reconnect_retry_period":0.1,
"pool_size":5,
"pool_name":"mypool"
}
# "host": "127.0.0.1",
# "port": 3306,
class DB(object):
"""
DESCRIPTION
-----------
A class containing functions to connect, query, and update MySQL database
instances.
PARAMETERS
----------
serv_name : str
A string value representing the name of the server the database exists on.
sync_tbls_lst : list
A list containing the names of the tables which will be evaluated and synced.
log_instance : logging.getLogger
A logging instance used to log errors, exceptions, and other information
pertaining to the database instance.
master_db : bool (default=False)
A boolean flag representing whether the DB instance is the master database.
use_internal : bool (default=False)
A boolean flag representing whether to use internal IP addresses (in case
where all servers can be accessed within the same network)
remote_sync : bool (default=False)
A boolean flag representing whether the database is being accessed via
SSH tunnel from a remote location.
update_method : str (default="PK")
A string value representing the method for which to track current and last
saved locations for database tables. Currently supported options include
PK (primary key) and TS (timestamp).
overwrite_slave : bool (default=False)
A boolean flag representing whether to overwrite any modifications made to
a slave database from the last save point by syncing from the master.
NOTES
-----
With the given parameter value `overwrite_slave` as True, the class
will prepare the database table by deleting any modifications made since the
previous sync was made, barring that the database is the master db.
MODIFICATIONS
-------------
Created : 5/7/19
"""
def __init__(self, serv_name, sync_tbls_lst, log_instance, master_db=False,
use_internal=False, remote_sync=False, update_method="PK",
overwrite_slave=False):
self.serv_name = serv_name
self.sync_tbls_lst = sync_tbls_lst
self.sync_log = log_instance
self.master_db = master_db
self.remote_sync = remote_sync
self.update_method = update_method
self.overwrite_slave = overwrite_slave
if use_internal:
self.db_ip_add = ProdDBArgs.ips[serv_name]["internal"]
else:
self.db_ip_add = ProdDBArgs.ips[serv_name]["public"]
self.initialize_db_instance()
def initialize_db_instance(self):
"""
DESCRIPTION
-----------
A wrapper function for initializing the DB instance. Verify the table
sync list, create the engine instance, obtain the last saved update from
the previous sync, obtain the current update points for all tables,
determine if the table has been modified, make appropriate additions or
deletions to tables to prepare for syncing from master to slave.
MODIFICATIONS
-------------
Created : 5/7/19
"""
self.create_default_sync_table_list()
self.verify_sync_tbls_lst()
self.create_engine()
self.get_comp_field_name()
def update_db_table_positions(self):
self.get_last_saved_update_df()
self.create_current_update_df()
self.are_tbls_modified = self.eval_if_tbls_have_been_modified()
self.initial_sync_actions()
def create_engine(self):
"""
DESCRIPTION
-----------
Create an engine instance; a pool of connections to the database described
by the provided parameters and default arguments provided in the ProdDBArgs
class instance.
NOTES
-----
Assigns the engine as an attribute to the class.
MODIFICATIONS
-------------
Created : 5/7/19
"""
args = ProdDBArgs.args
if self.remote_sync is True:
args["host"] = "127.0.0.1"
args["port"] = ProdDBArgs.ips[self.serv_name]["remote_port"]
else:
args["port"] = 3306
if self.master_db is True:
args["host"] = "127.0.0.1"
else:
args["host"] = self.db_ip_add
self.engine = MySQLEngine(**args)
def insert(self, tbl_name, tbl_df):
"""
DESCRIPTION
-----------
Insert pandas dataframe object into DB
PARAMETERS
----------
table_name : str
The table name given in the db as the destination for inserted data
table_df : pd.DataFrame object
The dataframe from which to convert to SQL
RETURNS
-------
first_insert_id : int
The first primary key id value inserted into the DB, used for marking
appropriate id's back into the dataframe object
MODIFICATIONS
-------------
Created : 1/29/19
Modified : 4/18/19
- Adapted for base streaming class from base strategy class.
"""
with self.engine.connection(commit=False) as con:
first_insert_id = con.insert(tbl_name, tbl_df)
return first_insert_id
def select(self, query):
"""
DESCRIPTION
-----------
Return a pandas dataframe object using a select type query on the DB
PARAMETERS
----------
query : str
A query string to pass to the DB to return selected rows
RETURNS
-------
df : pandas.DataFrame object
Input the returned sql data into a pandas dataframe object
MODIFICATIONS
-------------
Created : 1/29/19
Modifiied : 4/18/19
- Adapted for base streaming class from base strategy class.
"""
with self.engine.connection(commit=False) as con:
df = con.select(query)
return df
def update(self, query):
"""
DESCRIPTION
-----------
Given a query string, update existing rows in the DB according to the query
PARAMETERS
----------
query : str
Formatted SQL update query as a string object
MODIFICATIONS
-------------
Created : 1/29/19
Modifiied : 4/18/19
- Adapted for base streaming class from base strategy class.
"""
with self.engine.connection(commit=False) as con:
con.update(query)
def write_df_in_chunks(self, df, tbl_name, chunksize=10000):
"""
DESCRIPTION
-----------
Intended for use with large dataframes, write to database in chunks of a
specified row length (chunksize).
PARAMETERS
----------
df : pd.DataFrame
A pandas dataframe to write to the database.
tbl_name : str
A string representing the name of the table for which to write in
the database.
chunksize : int (default = 10000)
An integer value representing the number of rows to write in one
insert query
MODIFICATIONS
-------------
Created : 4/25/19
Modified : 5/7/19
- Modified from the base streaming class for syncing databases with
intention to transfer large amounts of data or entire tables if
necessary.
"""
for i in range(0, len(df), chunksize):
tmp = df.iloc[i:i+chunksize]
self.insert(tbl_name, tmp)
def create_default_sync_table_list(self):
"""
DESCRIPTION
-----------
Assign default table names and their respective primary key fields in a
reference dictionary to be accessed and cross-checked when table sync
requests are made.
NOTES
-----
Assigns the default sync tbls dict to the class.
MODIFICATIONS
-------------
Created ; 5/6/19
"""
self.default_sync_tbls_dict = {
"CurrencyPair1MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair5MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair15MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair30MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair1HourExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair4HourExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair1DayExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair1WeekExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyPair1MonthExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair1MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair5MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair15MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair30MinExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair1HourExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair4HourExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair1DayExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair1WeekExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"AltCurrencyPair1MonthExchangeRateAggregate": "currencyPairTimePeriodExchangeRateId",
"CurrencyDailyRanking": "currencyRankingId",
"CurrencyWeeklyRanking": "currencyRankingId",
"CurrencyMonthlyRanking": "currencyRankingId",
"AltCurrencyDailyRanking": "currencyRankingId",
"AltCurrencyWeeklyRanking": "currencyRankingId",
"AltCurrencyMonthlyRanking": "currencyRankingId",
"CurrencyPairDailyExchangeRateLevel": "levelId",
"CurrencyPairWeeklyExchangeRateLevel": "levelId",
"CurrencyPairMonthlyExchangeRateLevel": "levelId",
"AltCurrencyPairDailyExchangeRateLevel": "levelId",
"AltCurrencyPairWeeklyExchangeRateLevel": "levelId",
"AltCurrencyPairMonthlyExchangeRateLevel": "levelId",
"CurrencyPairExchangeRate": "currencyPairExchangeRateId",
"AltCurrencyPairExchangeRate": "currencyPairExchangeRateId",
"TimePeriodDimension": "timePeriodDimensionId"
}
def verify_sync_tbls_lst(self):
"""
DESCRIPTION
-----------
Compare the user input tables list to the default server tables list to
verify that all user entries are valid database table names.
RAISES
------
NameError :
In the case that the server name provided does not match any of the
default table names, raise this error to ensure the user input is
corrected.
TypeError :
In the case that neither a list or the default None argument is passed
by the user, raise this error to inform that the input type provided
is not valid.
MODIFICATIONS
-------------
Created : 5/6/19
"""
if self.sync_tbls_lst is None:
self.sync_tbls_dict = self.default_sync_tbls_dict
elif isinstance(self.sync_tbls_lst, list):
for tbl_name in self.sync_tbls_lst:
if tbl_name not in self.default_sync_tbls_dict:
raise NameError(f"{tbl_name} is not a supported table name.")
else:
self.sync_tbls_dict[tbl_name] = self.default_sync_tbls_dict[tbl_name]
else:
raise TypeError(f"sync tables list must be Nonetype or of type `list`.")
def get_last_saved_update_df(self):
"""
DESCRIPTION
-----------
A wrapper function intened to obtain the last saved updates for each table
according to the provided update method.
NOTES
-----
Assigns the last update dataframe as an attribute to the class.
RAISES
------
ValueError :
Raise a value error if the update method selected is neither of the
supported methods (Primary Key, Last updated timestamp)
MODIFICATIONS
-------------
Created : 5/7/19
"""
if self.update_method == "PK":
self.create_pk_csv_locations()
self.last_update_df = self.read_last_saved_csv(
self.pk_csv_fp,
self.pk_csv_backup_fp
)
elif self.update_method == "TS":
self.create_ts_csv_locations()
self.last_update_df = self.read_last_saved_csv(
self.ts_csv_fp,
self.ts_csv_backup_fp
)
else:
raise ValueError(f"{self.update_method} is not a supported update method.")
def get_last_update_ts(self, tbl_name):
"""
DESCRIPTION
-----------
Obtain the greatest timestamp available for data in a database table.
This is completed by joining the table with the TimePeriodDimension table
in all except the CurrencyPairExchangeRate and AltCurrencyPairExchangeRate
tables, which have their own independent timestamp fields.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the table in the database.
RETURNS
-------
A pandas dataframe instance containing the resulting maximum timestamp
value available in the database table.
MODIFICATIONS
-------------
Created : 5/7/19
"""
if tbl_name not in ("CurrencyPairExchangeRate", "AltCurrencyPairExchangeRate"):
query = ("SELECT "
"tpd.timePeriodStartTs "
"FROM "
f"{tbl_name} tbl "
"JOIN TimePeriodDimension tpd "
"ON tbl.timePeriodDimensionId = tpd.timePeriodDimensionId "
"ORDER BY tpd.timePeriodStartTs DESC "
"LIMIT 1;"
)
else:
query = ("SELECT "
"currencyPairExchangeRateTs "
"FROM "
f"{tbl_name} "
"ORDER BY currencyPairExchangeRateTs DESC "
"LIMIT 1; "
)
return self.select(query)
def get_last_pk(self, tbl_name, prim_key):
"""
DESCRIPTION
-----------
Obtain the greatest primary key value available for a database table.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the table in the database.
prim_key : str
A string value representing the field name of the primary key in the table.
RETURNS
-------
A pandas dataframe instance containing the value of the greatest primary
key value available in a database table.
MODIFICATIONS
-------------
Created : 5/7/19
"""
query = ("SELECT "
f"{prim_key} "
"FROM "
f"{tbl_name} "
f"ORDER BY {prim_key} DESC "
"LIMIT 1;"
)
return self.select(query)
def create_current_update_df(self):
"""
DESCRIPTION
-----------
A wrapper function intended to obtain the current primary key value or
last timestamp available for all tables in the list of tables to sync.
NOTES
-----
Assigns the current update dataframe instance as an attribute of the class.
RAISES
------
ValueError :
Raise a value error if the update method selected is neither of the
supported methods (Primary Key, Last Updated Timestamp)
MODIFICATIONS
-------------
Created : 5/7/19
"""
if self.update_method == "PK":
self.create_current_pk_df()
elif self.update_method == "TS":
self.create_current_ts_df()
else:
raise ValueError(f"{self.update_method} is not a supported update method.")
def get_comp_field_name(self):
"""
DESCRIPTION
-----------
Return the value of the column name in the pandas dataframe based on the
update method parameter.
MODIFICATIONS
-------------
Created : 5/8/19
"""
if self.update_method == "PK":
self.comp_field_name = "prim_key"
elif self.update_method == "TS":
self.comp_field_name = "last_ts"
else:
raise ValueError(f"{self.update_method} is not a supported update method.")
def get_db_field_name(self, tbl_name):
"""
DESCRIPTION
-----------
Return the value of the field name representing the primary key or timestamp
column used to measure updates
PARAMETERS
----------
tbl_name : str
A string value representing the name of the table in the database.
NOTES
-----
Assigns `db_field_name` as an attribute to the class.
MODIFICATIONS
-------------
Created : 5/8/19
"""
if self.update_method == "PK":
return self.sync_tbls_dict[tbl_name]
elif self.update_method == "TS":
if tbl_name not in ("CurrencyPairExchangeRate", "AltCurrencyPairExchangeRate"):
return "timePeriodStartTs"
else:
return "currencyPairExchangeRateTs"
def create_current_pk_df(self):
"""
DESCRIPTION
-----------
Query each database table in the sync tables dictionary and obtain the
last available primary key value. Create a dataframe which stores this
data with the table name as the index of the frame.
NOTES
-----
Assigns curr_update_df as an attribute of the class.
MODIFICATIONS
-------------
Created : 5/7/19
"""
self.curr_update_df = pd.DataFrame()
for tbl, prim_key in self.sync_tbls_dict.items():
self.update_current_pk_df(tbl, prim_key)
def update_current_pk_df(self, tbl_name, prim_key):
"""
DESCRIPTION
-----------
Retrieve and replace the current update value for a table given its
primary key and table name.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the table in the database.
prim_key : str
A string value representing the name of the primary key field in the
given table.
NOTES
-----
Modifies the curr_update_df in place.
MODIFICATIONS
-------------
Created : 5/8/19
"""
last_pk = self.get_last_pk(tbl_name, prim_key)
self.curr_update_df.loc[tbl_name, self.comp_field_name] = last_pk.values
def create_current_ts_df(self):
"""
DESCRIPTION
-----------
Query each database table in the sync tables dictionary and obtain the
last available timestamp value. Create a dataframe which stores this
data with the table name as the index of the frame.
NOTES
-----
Assigns curr_update_df as an attribute of the class.
MODIFICATIONS
-------------
Created ; 5/7/19
"""
self.curr_update_df = pd.DataFrame()
for tbl in self.sync_tbls_dict:
self.update_current_ts_df(tbl)
def update_current_ts_df(self, tbl_name):
"""
DESCRIPTION
-----------
Retrieve and replace the current update timestamp value for a table given
the name of the table.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the table in the database.
NOTES
-----
Modifies the curr_update_df in place.
MODIFICATIONS
-------------
Created : 5/8/19
"""
last_ts = self.get_last_update_ts(tbl_name)
self.curr_update_df.loc[tbl_name, self.comp_field_name] = last_ts.values
def eval_if_tbls_have_been_modified(self):
"""
DESCRIPTION
-----------
Evaluate using a boolean flag whether any database table has been modified
since the last save of the sync module.
RETURNS
-------
Boolean value representing whether any table has been modified since the
last save of the sync module.
NOTES
-----
This is useful for checking a database with itself, it is not meant to
evaluate whether the master and slave databases are synced.
MODIFICATIONS
-------------
Created : 5/6/19
"""
return not self.curr_update_df.equals(self.last_update_df)
def create_pk_csv_locations(self):
"""
DESCRIPTION
-----------
Build the filepath for the existing primary key .csv file as well as the
backup filepath.
NOTES
-----
Assigns the filepath strings to the DB class instance.
MODIFICATIONS
-------------
Created : 5/6/19
"""
self.pk_csv_fp = f"{os.getcwd()}/src/stream/sync/pk_saves/{self.serv_name}.csv"
self.pk_csv_backup_fp = f"{os.getcwd()}/src/stream/sync/backup_pk_saves/{self.serv_name}.csv"
def create_ts_csv_locations(self):
"""
DESCRIPTION
-----------
Build the filepath for the existing last timestamp update .csv file as well
as the backup filepath.
NOTES
-----
Assigns the filepath strings to the DB class instance.
MODIFICATIONS
-------------
Created : 5/6/19
"""
self.ts_csv_fp = f"{os.getcwd()}/src/stream/sync/ts_saves/{self.serv_name}.csv"
self.ts_csv_backup_fp = f"{os.getcwd()}/src/stream/sync/backup_ts_saves/{self.serv_name}.csv"
def initial_sync_actions(self):
"""
DESCRIPTION
-----------
A set of actions taken before a comparison between master and slave databases
are even made. If any table has been modified since the last saved sync
location and the database is not master and the overwrite parameters is
True, prepare the table for sync by deleting rows occurring after the last
save sync location or truncating the table completely if the last save
location does not exist (first time sync).
MODIFICATIONS
-------------
Created : 5/7/19
"""
if (self.are_tbls_modified) & (not self.master_db) & (self.overwrite_slave): # DO NOT MODIFY MASTER DB
if self.last_update_df is None:
for tbl in self.sync_tbls_dict:
self.truncate_table(tbl)
else:
for tbl in self.curr_update_df.index:
if not tbl in self.last_update_df.index:
self.truncate_table(tbl)
else:
if self.update_method == "PK":
self.delete_by_last_pk_id(
tbl,
self.sync_tbls_dict[tbl],
self.last_update_df.loc[tbl, self.comp_field_name]
)
elif self.update_method == "TS":
self.delete_by_timeperiod_start_ts(
tbl,
self.last_update_df.loc[tbl, self.comp_field_name]
)
def save_updates_to_csv(self, df, fp, backup_fp):
"""
DESCRIPTION
-----------
Save the current updates containing primary key or latest timestamp to
file after a database sync from master to slave is completed.
PARAMETERS
----------
df : pd.DataFrame
A pandas dataframe instance containing primary key or latest timestamp
values with the database table name as the table's index.
fp : str
A string value representing the filepath for which to save the .csv file.
backup_fp : str
A string value representing the backup filepath for which to save the
.csv file in case the first filepath is lost or corrupted.
NOTES
-----
The "w" mode on to_csv overwrites any file currently in place.
MODIFICATIONS
-------------
Created : 5/7/19
"""
df.to_csv(fp, mode="w")
df.to_csv(backup_fp, mode="w")
def read_from_last_pk_id(self, tbl_name, prim_key, last_value, limit=50000):
"""
DESCRIPTION
-----------
Query all rows of a table whose primary key value is greater than the
last saved value. Return as a pandas dataframe instance.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the database table
prim_key : str
A string value representing the field name of the primary key in the
database table.
last_value : int, float
An integer or float value (should be integer but float should be
supported) representing the last saved value of the primary key from
the last sync.
limit : int (default=50000)
An integer value representing the maximum number of rows to download
in a single query.
RETURNS
-------
A pandas dataframe instance containing rows whose primary key value is
greater than the last_value parameter.
MODIFICATIONS
-------------
Created : 5/7/19
"""
query = ("SELECT "
"* "
"FROM "
f"{tbl_name} "
"WHERE "
f"{prim_key} > {last_value} "
f"ORDER BY {prim_key} "
f"LIMIT {limit};"
)
return self.select(query)
def read_from_last_timeperiod_start_ts(self, tbl_name, last_value, limit=50000):
"""
DESCRIPTION
-----------
Query all rows of a table where the timePeriodStartTs is greater than the
last saved value. Return these rows as a pandas dataframe instance.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the table in the database.
last_value : int, float
An integer or float value (should be integer but float should be
supported) representing the last saved value of the timePeriodStartTs from
the last sync.
limit : int (default=50000)
An integer value representing the maximum number of rows to download
in a single query.
RETURNS
-------
A pandas dataframe instance containing rows whose timePeriodStartTs value
is greater than the last saved parameter.
MODIFICATIONS
-------------
Created ; 5/7/19
"""
if tbl_name not in ("CurrencyPairExchangeRate", "AltCurrencyPairExchangeRate"):
query = ("SELECT "
"* "
"FROM "
f"{tbl_name} tbl "
"JOIN "
"TimePeriodDimension tpd "
"ON "
f"{tbl_name}.timePeriodDimensionId = tpd.timePeriodDimensionId "
"WHERE "
f"tpd.timePeriodStartTs > '{last_value}' "
"ORDER BY tpd.timePeriodStartTs "
f"LIMIT {limit};"
)
else:
query = ("SELECT "
"* "
"FROM "
f"{tbl_name} "
"WHERE "
"currencyPairExchangeRateTs > '{last_value}' "
"ORDER BY currencyPairExchangeRateTs "
"LIMIT {limit};"
)
return self.select(query)
def read_from_table(self, tbl_name, last_value, limit=50000):
"""
DESCRIPTION
-----------
A wrapper function used to return a pandas dataframe containing all rows
of a database table where the primary key or timestamp value is greater
than the last_value parameter.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the database table.
last_value : int, float
An integer or float value (should be integer but float should be
supported) representing the last saved value of the primary key from
the last sync.
limit : int (default=50000)
An integer value representing the maximum number of rows to download
in a single query.
RETURNS
-------
A pandas dataframe instance containing all rows of a database table where
the primary key or timestamp is greater than the last value provided.
MODIFICATIONS
-------------
Created : 5/7/19
"""
if self.update_method == "PK":
prim_key = self.sync_tbls_dict[tbl_name]
return self.read_from_last_pk_id(tbl_name, prim_key, last_value, limit=limit)
elif self.update_method == "TS":
return self.read_from_last_timeperiod_start_ts(tbl_name, last_value, limit=limit)
else:
raise ValueError(f"{self.update_method} is not a supported update method.")
def truncate_table(self, tbl_name):
"""
DESCRIPTION
-----------
Truncate a database table.
PARAMETERS
----------
tbl_name : str
A string value representing the name of the database table.
MODIFICATIONS
-------------
Created : 5/7/19
"""
query = "TRUNCATE TABLE {tbl_name};"
self.update(query)
self.sync_log.info(f"{self.serv_name} {tbl_name} table truncated.")
def delete_by_last_pk_id(self, tbl_name, pk_col, del_after_id):
"""
DESCRIPTION
-----------
Create and execute a DELETE SQL query to delete all rows after a provided
primary key id value from a provided database table.
PARAMETERS
----------
tbl_name : str
A string value representing a database table name.
pk_col : str
A string value representing the name of the primary key value in the table.
del_after_id : int, float
An integer or float value (should be integer but float should be
supported) representing the last saved value of the primary key from
the last sync, after which all new rows should be deleted.
MODIFICATIONS
-------------
Created : 5/7/19
"""
query = ("DELETE "
f"{tbl_name} "
"FROM "
f"{tbl_name} "
"WHERE "
f"{tbl_name}.{pk_col} > {del_after_id};"
)
self.update(query)
self.sync_log.info(f"{self.serv_name} {tbl_name} PK entries deleted.")
def delete_by_timeperiod_start_ts(self, tbl_name, del_after_ts):
"""
DESCRIPTION
-----------
Create and execute a DELETE SQL query to delete all rows after a provided
timestamp from a provided database table.
PARAMETERS
----------
tbl_name : str
A string value representing a database table name.
del_after_ts : dt.datetime
A datetime or timestamp object representing the timestamp to be used
as a condition to delete any future rows after.
MODIFICATIONS
-------------
Created : 5/7/19
"""
if tbl_name not in ("CurrencyPairExchangeRate", "AltCurrencyPairExchangeRate"):
query = ("DELETE "
f"{tbl_name} "
"FROM "
f"{tbl_name} "
"JOIN TimePeriodDimension "
f"ON TimePeriodDimension.timePeriodDimensionId = {tbl_name}.timePeriodDimensionId "
"WHERE "
f"TimePeriodDimension.timePeriodStartTs > '{del_after_ts}';"
)
else:
query = ("DELETE "
"FROM "
f"{tbl_name} "
"WHERE "
f"currencyPairExchangeRateTs > '{del_after_ts};"
)
self.update_query(query)
self.sync_log.info(f"{self.serv_name} {tbl_name} TS entries deleted after {del_after_ts}")
def read_last_saved_csv(self, fp, backup_fp):
"""
DESCRIPTION
-----------
Attempt to read the last primary key values from a written .csv file stored
at the `pk_csv_fp` filepath. If the filepath does not exist, attempt to
read from the backup file. If this also does not exist, return a Null value.
PARAMETERS
----------
fp : str
A string value representing the expected location of the file
containing the last updates made from a database sync broken down
by table name.
backup_fp : str
A string value representing the expected location of the backup file
containing the last updates made from a database sync broken down by
table name in case original filepath is for some reason moved or
deleted.
RETURNS
-------
A pandas dataframe instance containing the last inserted primary key values
of a database sync or Null if the .csv file does not exist.
MODIFICATIONS
-------------
Created ; 5/6/19
"""
try:
return pd.read_csv(fp, index_col=0)
except FileNotFoundError as orig_nfe:
try:
return | pd.read_csv(backup_fp, index_col=0) | pandas.read_csv |
import lib
from lib.recommenders import *
import collections
import pandas as pd
# Methods to demo
METHODS = [lambda x, y: DebiasedModel(x, y), lambda x, y: ItemKNN(x, y, alpha=0.3, lmbda=0.0)]
PRINT_TOP_K = 10
def load_models(unlabeled_data, labeled_data):
models = collections.OrderedDict()
for model_constructor in METHODS:
model = model_constructor(unlabeled_data, labeled_data)
model.fit()
models[model.__class__.__name__] = model
return models
def print_candidates(models, movie, k=PRINT_TOP_K):
results = collections.OrderedDict()
for name, model in models.items():
results[name] = model.get_scored_candidates(movie, k=k).to_dict('records')
# format side-by-side
dct = collections.OrderedDict()
for method, v in results.items():
for col in ["title", "score"]:
dct[(method, col)] = pd.DataFrame(v)[col].values
print( | pd.DataFrame(dct) | pandas.DataFrame |
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from ml_tooling import Model
from ml_tooling.data import Dataset, load_demo_dataset
from ml_tooling.transformers import DFStandardScaler
from ml_tooling.utils import DataType
class TestDemoDatasetModule:
@pytest.fixture
def load_dataset_iris(self) -> Dataset:
return load_demo_dataset("iris")
@pytest.fixture
def iris_df(self):
iris_data = load_iris()
return (
pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names),
iris_data.target,
)
def test_repr_is_correct_load(self, load_dataset_iris: Dataset):
result = str(load_dataset_iris)
assert result == "<IrisData - Dataset>"
def test_dataset_return_correct_x_attribute(
self, load_dataset_iris: Dataset, iris_df: Tuple[pd.DataFrame, DataType]
):
x_expected, y_expected = iris_df
pd.testing.assert_frame_equal(load_dataset_iris.x, x_expected)
def test_dataset_return_correct_y_attribute(
self, load_dataset_iris: Dataset, iris_df: Tuple[pd.DataFrame, DataType]
):
x_expected, y_expected = iris_df
assert np.array_equal(load_dataset_iris.y, y_expected)
def test_dataset_from_fetchopenml_works(self):
dataset = load_demo_dataset("openml", name="miceprotein")
assert len(dataset.x) == 1080
def test_dataset_x_from_fetchopenml_with_parameters_works(self):
dataset = load_demo_dataset(
"openml", name="blood-transfusion-service-center", target_column="V1"
)
features_x = dataset.x
assert features_x.shape == (748, 4)
def test_dataset_y_from_fetchopenml_with_two_target_columns_works(self):
dataset = load_demo_dataset(
"openml",
name="blood-transfusion-service-center",
target_column=["V1", "V2"],
)
features_y = dataset.y
assert features_y.shape == (748, 2)
def test_load_prediction_data_works_as_expected(self):
dataset = load_demo_dataset("iris")
dataset.create_train_test(stratify=True)
feature_pipeline = Pipeline([("scale", DFStandardScaler())])
model = Model(LogisticRegression(), feature_pipeline=feature_pipeline)
model.train_estimator(dataset)
result = model.make_prediction(dataset, 5)
expected = pd.DataFrame({"Prediction": [0]})
| pd.testing.assert_frame_equal(result, expected, check_dtype=False) | pandas.testing.assert_frame_equal |
import artm
import dill
import glob
import inspect
import json
import os
import pandas as pd
import pickle
import shutil
import warnings
from artm.wrapper.exceptions import ArtmException
from copy import deepcopy
from inspect import signature
from numbers import Number
from six import iteritems
from typing import (
Any,
Dict,
List,
)
from . import scores as tn_scores
from .base_model import BaseModel
from .base_score import BaseScore
from .frozen_score import FrozenScore
from ..cubes.controller_cube import ControllerAgent
from ..routine import transform_complex_entity_to_dict
# TODO: can't import Experiment from here (to specify type in init)
# probably need to rearrange imports
# (Experiment and Models are kind of in one bunch: one should be able to know about the other)
from .scores_wrapper import ScoresWrapper
LIBRARY_VERSION = artm.version()
ARTM_NINE = LIBRARY_VERSION.split(".")[1] == "9"
SUPPORTED_SCORES_WITHOUT_VALUE_PROPERTY = (
artm.score_tracker.TopTokensScoreTracker,
artm.score_tracker.ThetaSnippetScoreTracker,
artm.score_tracker.TopicKernelScoreTracker,
)
class TopicModel(BaseModel):
"""
Topic Model contains artm model and all necessary information: scores, training pipeline, etc.
"""
def __init__(
self,
artm_model: artm.ARTM = None,
model_id: str = None,
parent_model_id: str = None,
data_path: str = None,
description: List[Dict[str, Any]] = None,
experiment=None,
callbacks: List[ControllerAgent] = None,
custom_scores: Dict[str, BaseScore] = None,
custom_regularizers: Dict[str, artm.regularizers.BaseRegularizer] = None,
*args, **kwargs):
"""
Initialize stage, also used for loading previously saved experiments.
Parameters
----------
artm_model : artm model or None
model to use, None if you want to create model (Default value = None)
model_id : str
model id (Default value = None)
parent_model_id : str
model id from which current model was created (Default value = None)
data_path : str
path to the data (Default value = None)
description : list of dict
description of the model (Default value = None)
experiment : Experiment
the experiment to which the model is bound (Default value = None)
callbacks : list of objects with invoke() method
function called inside _fit which alters model parameters
mainly used for fancy regularizer coefficients manipulation
custom_scores : dict
dictionary with score names as keys and score classes as functions
(score class with functionality like those of BaseScore)
custom_regularizers : dict
dictionary with regularizer names as keys and regularizer classes as values
"""
super().__init__(model_id=model_id, parent_model_id=parent_model_id,
experiment=experiment, *args, **kwargs)
if callbacks is None:
callbacks = list()
if custom_scores is None:
custom_scores = dict()
if custom_regularizers is None:
custom_regularizers = dict()
self.callbacks = list(callbacks)
if artm_model is not None:
self._model = artm_model
else:
artm_ARTM_args = inspect.getfullargspec(artm.ARTM).args
kwargs = {k: v for k, v in kwargs.items() if k in artm_ARTM_args}
try:
self._model = artm.ARTM(**kwargs)
except ArtmException as e:
error_message = repr(e)
raise ValueError(
f'Cannot create artm model with parameters {kwargs}.\n'
"ARTM failed with following: " + error_message
)
self.data_path = data_path
self.custom_scores = custom_scores
self.custom_regularizers = custom_regularizers
self.library_version = LIBRARY_VERSION
self._description = []
if description is None and self._model._initialized:
init_params = self.get_jsonable_from_parameters()
self._description = [{"action": "init",
"params": [init_params]}]
else:
self._description = description
self._scores_wrapper = ScoresWrapper(
topicnet_scores=self.custom_scores,
artm_scores=self._model.scores
)
def __getattr__(self, attr_name):
return getattr(self._model, attr_name)
def _get_all_scores(self):
if len(self._model.score_tracker.items()) == 0:
yield from {
key: FrozenScore(list())
for key in self._model.scores.data.keys()
}.items()
yield from self._model.score_tracker.items()
if self.custom_scores is not None: # default is dict(), but maybe better to set None?
yield from self.custom_scores.items()
def _compute_score_values(self):
def get_score_properties_and_values(score_name, score_object):
for internal_name in dir(score_object):
if internal_name.startswith('_') or internal_name.startswith('last'):
continue
score_property_name = score_name + '.' + internal_name
yield score_property_name, getattr(score_object, internal_name)
score_values = dict()
for score_name, score_object in self._get_all_scores():
try:
score_values[score_name] = getattr(score_object, 'value')
except AttributeError:
if not isinstance(score_object, SUPPORTED_SCORES_WITHOUT_VALUE_PROPERTY):
warnings.warn(f'Score "{str(score_object.__class__)}" is not supported')
continue
for score_property_name, value in get_score_properties_and_values(
score_name, score_object):
score_values[score_property_name] = value
return score_values
def _fit(self, dataset_trainable, num_iterations, custom_regularizers=None):
"""
Parameters
----------
dataset_trainable : BatchVectorizer
Data for model fit
num_iterations : int
Amount of fit steps
custom_regularizers : dict of BaseRegularizer
Regularizers to apply to model
"""
if custom_regularizers is None:
custom_regularizers = dict()
all_custom_regularizers = deepcopy(custom_regularizers)
all_custom_regularizers.update(self.custom_regularizers)
if len(all_custom_regularizers) != 0:
for regularizer in all_custom_regularizers.values():
regularizer.attach(self._model)
base_regularizers_name = [regularizer.name
for regularizer in self._model.regularizers.data.values()]
base_regularizers_tau = [regularizer.tau
for regularizer in self._model.regularizers.data.values()]
for cur_iter in range(num_iterations):
self._model.fit_offline(batch_vectorizer=dataset_trainable,
num_collection_passes=1)
if len(all_custom_regularizers) != 0:
self._apply_custom_regularizers(
dataset_trainable, all_custom_regularizers,
base_regularizers_name, base_regularizers_tau
)
for name, custom_score in self.custom_scores.items():
try:
score = custom_score.call(self)
custom_score.update(score)
self._model.score_tracker[name] = custom_score
except AttributeError: # TODO: means no "call" attribute?
raise AttributeError(f'Score {name} doesn\'t have a desired attribute')
# TODO: think about performance issues
for callback_agent in self.callbacks:
callback_agent.invoke(self, cur_iter)
self._scores_wrapper._reset_score_caches()
def _apply_custom_regularizers(self, dataset_trainable, custom_regularizers,
base_regularizers_name, base_regularizers_tau):
"""
Parameters
----------
dataset_trainable : BatchVectorizer
Data for model fit
custom_regularizers : dict of BaseRegularizer
Regularizers to apply to model
base_regularizers_name : list of str
List with all artm.regularizers names, applied to model
base_regularizers_tau : list of float
List with tau for all artm.regularizers, applied to model
"""
pwt = self._model.get_phi(model_name=self._model.model_pwt)
nwt = self._model.get_phi(model_name=self._model.model_nwt)
rwt_name = 'rwt'
self._model.master.regularize_model(pwt=self._model.model_pwt,
nwt=self._model.model_nwt,
rwt=rwt_name,
regularizer_name=base_regularizers_name,
regularizer_tau=base_regularizers_tau)
(meta, nd_array) = self._model.master.attach_model(rwt_name)
attached_rwt = pd.DataFrame(data=nd_array, columns=meta.topic_name, index=meta.token)
for regularizer in custom_regularizers.values():
attached_rwt.values[:, :] += regularizer.grad(pwt, nwt)
self._model.master.normalize_model(pwt=self._model.model_pwt,
nwt=self._model.model_nwt,
rwt=rwt_name)
def get_jsonable_from_parameters(self):
"""
Gets artm model params.
Returns
-------
dict
artm model parameters
"""
parameters = transform_complex_entity_to_dict(self._model)
regularizers = {}
for name, regularizer in iteritems(self._model._regularizers.data):
tau = None
gamma = None
try:
tau = regularizer.tau
gamma = regularizer.gamma
except KeyError:
pass
regularizers[name] = [str(regularizer.config), tau, gamma]
for name, regularizer in iteritems(self.custom_regularizers):
tau = getattr(regularizer, 'tau', None)
gamma = getattr(regularizer, 'gamma', None)
config = str(getattr(regularizer, 'config', ''))
regularizers[name] = [config, tau, gamma]
parameters['regularizers'] = regularizers
parameters['version'] = self.library_version
return parameters
def get_init_parameters(self, not_include=None):
if not_include is None:
not_include = list()
init_artm_parameter_names = [
p.name for p in list(signature(artm.ARTM.__init__).parameters.values())
][1:]
parameters = transform_complex_entity_to_dict(self._model)
filtered = dict()
for parameter_name, parameter_value in parameters.items():
if parameter_name not in not_include and parameter_name in init_artm_parameter_names:
filtered[parameter_name] = parameter_value
return filtered
def save_custom_regularizers(self, model_save_path=None):
if model_save_path is None:
model_save_path = self.model_default_save_path
for regularizer_name, regularizer_object in self.custom_regularizers.items():
try:
save_path = os.path.join(model_save_path, regularizer_name + '.rd')
with open(save_path, 'wb') as reg_f:
dill.dump(regularizer_object, reg_f)
except (TypeError, AttributeError):
try:
save_path = os.path.join(model_save_path, regularizer_name + '.rp')
with open(save_path, 'wb') as reg_f:
pickle.dump(regularizer_object, reg_f)
except (TypeError, AttributeError):
warnings.warn(f'Cannot save {regularizer_name} regularizer.')
def save(self,
model_save_path=None,
phi=True,
theta=False,
dataset=None,):
"""
Saves model description and dumps artm model.
Use this method if you want to dump the model.
Parameters
----------
model_save_path : str
path to the folder with dumped info about model
phi : bool
save phi in csv format if True
theta : bool
save theta in csv format if True
dataset : Dataset
dataset
"""
if model_save_path is None:
model_save_path = self.model_default_save_path
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
if phi:
self._model.get_phi().to_csv(os.path.join(model_save_path, 'phi.csv'))
if theta:
self.get_theta(dataset=dataset).to_csv(os.path.join(model_save_path, 'theta.csv'))
model_itself_save_path = os.path.join(model_save_path, 'model')
if os.path.exists(model_itself_save_path):
shutil.rmtree(model_itself_save_path)
self._model.dump_artm_model(model_itself_save_path)
self.save_parameters(model_save_path)
for score_name, score_object in self.custom_scores.items():
class_name = score_object.__class__.__name__
save_path = os.path.join(
model_save_path,
'.'.join([score_name, class_name, 'p'])
)
try:
score_object.save(save_path)
except pickle.PicklingError:
warnings.warn(
f'Failed to save custom score "{score_object}" correctly! '
f'Freezing score (saving only its value)'
)
frozen_score_object = FrozenScore(
score_object.value,
original_score=score_object
)
frozen_score_object.save(save_path)
self.save_custom_regularizers(model_save_path)
for i, agent in enumerate(self.callbacks):
save_path = os.path.join(model_save_path, f"callback_{i}.pkl")
with open(save_path, 'wb') as agent_file:
dill.dump(agent, agent_file)
@staticmethod
def load(path, experiment=None):
"""
Loads the model.
Parameters
----------
path : str
path to the model's folder
experiment : Experiment
Returns
-------
TopicModel
"""
if "model" in os.listdir(f"{path}"):
model = artm.load_artm_model(f"{path}/model")
else:
model = None
print("There is no dumped model. You should train it again.")
with open(os.path.join(path, 'params.json'), 'r', encoding='utf-8') as params_file:
params = json.load(params_file)
topic_model = TopicModel(model, **params)
topic_model.experiment = experiment
for score_path in glob.glob(os.path.join(path, '*.p')):
# TODO: file '..p' is not included, so score with name '.' will be lost
# Need to validate score name?
score_file_name = os.path.basename(score_path)
*score_name, score_cls_name, _ = score_file_name.split('.')
score_name = '.'.join(score_name)
score_cls = getattr(tn_scores, score_cls_name)
loaded_score = score_cls.load(score_path)
# TODO check what happens with score name
loaded_score._name = score_name
topic_model.scores.add(loaded_score)
for reg_file_extension, loader in zip(['.rd', '.rp'], [dill, pickle]):
for regularizer_path in glob.glob(os.path.join(path, f'*{reg_file_extension}')):
regularizer_file_name = os.path.basename(regularizer_path)
regularizer_name = os.path.splitext(regularizer_file_name)[0]
with open(regularizer_path, 'rb') as reg_file:
topic_model.custom_regularizers[regularizer_name] = loader.load(reg_file)
all_agents = glob.glob(os.path.join(path, 'callback*.pkl'))
topic_model.callbacks = [None for _ in enumerate(all_agents)]
for agent_path in all_agents:
file_name = os.path.basename(agent_path).split('.')[0]
original_index = int(file_name.partition("_")[2])
with open(agent_path, 'rb') as agent_file:
topic_model.callbacks[original_index] = dill.load(agent_file)
topic_model._scores_wrapper._reset_score_caches()
_ = topic_model.scores
return topic_model
def clone(self, model_id=None):
"""
Creates a copy of the model except model_id.
Parameters
----------
model_id : str
(Default value = None)
Returns
-------
TopicModel
"""
topic_model = TopicModel(artm_model=self._model.clone(),
model_id=model_id,
parent_model_id=self.parent_model_id,
description=deepcopy(self.description),
custom_scores=deepcopy(self.custom_scores),
custom_regularizers=deepcopy(self.custom_regularizers),
experiment=self.experiment)
topic_model._score_functions = deepcopy(topic_model.score_functions)
topic_model._scores = deepcopy(topic_model.scores)
topic_model.callbacks = deepcopy(self.callbacks)
return topic_model
def get_phi(self, topic_names=None, class_ids=None, model_name=None):
"""
Gets custom Phi matrix of model.
Parameters
----------
topic_names : list of str or str
list with topics or single topic to extract,
None value means all topics (Default value = None)
class_ids : list of str or str
list with class_ids or single class_id to extract,
None means all class ids (Default value = None)
model_name : str
self.model.model_pwt by default, self.model.model_nwt is also
reasonable to extract unnormalized counters
Returns
-------
pd.DataFrame
phi matrix
"""
if ARTM_NINE:
phi_parts_array = []
if isinstance(class_ids, str):
class_ids = [class_ids]
class_ids_iter = class_ids or self._model.class_ids
# TODO: this workaround seems to be a correct solution to this problem
if not class_ids_iter:
valid_model_name = self._model.model_pwt
info = self._model.master.get_phi_info(valid_model_name)
class_ids_iter = list(set(info.class_id))
for class_id in class_ids_iter:
phi_part = self._model.get_phi(topic_names, class_id, model_name)
phi_part.index.rename("token", inplace=True)
phi_part.reset_index(inplace=True)
phi_part["modality"] = class_id
phi_parts_array.append(phi_part)
phi = pd.concat(phi_parts_array).set_index(['modality', 'token'])
else:
phi = self._model.get_phi(topic_names, class_ids, model_name)
phi.index = pd.MultiIndex.from_tuples(phi.index, names=('modality', 'token'))
return phi
def get_phi_dense(self, topic_names=None, class_ids=None, model_name=None):
"""
Gets custom Phi matrix of model.
Parameters
----------
topic_names : list of str or str
list with topics or single topic to extract,
None value means all topics (Default value = None)
class_ids : list of str or str
list with class_ids or single class_id to extract,
None means all class ids (Default value = None)
model_name : str
self.model.model_pwt by default, self.model.model_nwt is also
reasonable to extract unnormalized counters
Returns
-------
3-tuple
dense phi matrix
"""
return self._model.get_phi_dense(topic_names, class_ids, model_name)
def get_phi_sparse(self, topic_names=None, class_ids=None, model_name=None, eps=None):
"""
Gets custom Phi matrix of model as sparse scipy matrix.
Parameters
----------
topic_names : list of str or str
list with topics or single topic to extract,
None value means all topics (Default value = None)
class_ids : list of str or str
list with class_ids or single class_id to extract,
None means all class ids (Default value = None)
model_name : str
self.model.model_pwt by default, self.model.model_nwt is also
reasonable to extract unnormalized counters
eps : float
threshold to consider values as zero (Default value = None)
Returns
-------
3-tuple
sparse phi matrix
"""
return self._model.get_phi_sparse(topic_names, class_ids, model_name, eps)
def get_theta(self, topic_names=None,
dataset=None,
theta_matrix_type='dense_theta',
predict_class_id=None,
sparse=False,
eps=None,):
"""
Gets Theta matrix as pandas DataFrame
or sparse scipy matrix.
Parameters
----------
topic_names : list of str or str
list with topics or single topic to extract,
None value means all topics (Default value = None)
dataset : Dataset
an instance of Dataset class (Default value = None)
theta_matrix_type : str
type of matrix to be returned, possible values:
‘dense_theta’, ‘dense_ptdw’, ‘cache’, None (Default value = ’dense_theta’)
predict_class_id : str
class_id of a target modality to predict. When this option
is enabled the resulting columns of theta matrix will
correspond to unique labels of a target modality. The values
will represent p(c|d), which give the probability of class
label c for document d (Default value = None)
sparse : bool
if method returns sparse representation of the data (Default value = False)
eps : float
threshold to consider values as zero. Required for sparse matrix.
depends on the collection (Default value = None)
Returns
-------
pd.DataFrame
theta matrix
"""
# assuming particular case of BigARTM library that user can't get theta matrix
# without cache_theta == True. This also covers theta_name == None case
if self._cache_theta:
# TODO wrap sparse in pd.SparseDataFrame and check that viewers work with that output
if sparse:
return self._model.get_theta_sparse(topic_names, eps)
else:
return self._model.get_theta(topic_names)
else:
if dataset is None:
raise ValueError("To get theta a dataset is required")
else:
batch_vectorizer = dataset.get_batch_vectorizer()
if sparse:
return self._model.transform_sparse(batch_vectorizer, eps)
else:
theta = self._model.transform(batch_vectorizer,
theta_matrix_type,
predict_class_id)
return theta
def to_dummy(self, save_path=None):
"""Creates dummy model
Parameters
----------
save_path : str (or None)
Path to folder with dumped info about topic model
Returns
-------
DummyTopicModel
Dummy model: without inner ARTM model,
but with scores and init parameters of calling TopicModel
Notes
-----
Dummy model has the same model_id as the original model,
but "model_id" key in experiment.models contains original model, not dummy
"""
from .dummy_topic_model import DummyTopicModel
# python crashes if place this import on top of the file
# import circle: TopicModel -> DummyTopicModel -> TopicModel
if save_path is None:
save_path = self.model_default_save_path
dummy = DummyTopicModel(
init_parameters=self.get_init_parameters(),
scores=dict(self.scores),
model_id=self.model_id,
parent_model_id=self.parent_model_id,
description=self.description,
experiment=self.experiment,
save_path=save_path,
)
# BaseModel spoils model_id trying to make it unique
dummy._model_id = self.model_id # accessing private field instead of public property
return dummy
def make_dummy(self, save_to_drive=True, save_path=None, dataset=None):
"""Makes topic model dummy in-place.
Parameters
----------
save_to_drive : bool
Whether to save model to drive or not. If not, the info will be lost
save_path : str (or None)
Path to folder to dump info to
dataset : Dataset
Dataset with text collection on which the model was trained.
Needed for saving Theta matrix
Notes
-----
After calling the method, the model is still of type TopicModel,
but there is no ARTM model inside! (so `model.get_phi()` won't work!)
If one wants to use the topic model as before,
this ARTM model should be restored first:
>>> save_path = topic_model.model_default_save_path
>>> topic_model._model = artm.load_artm_model(f'{save_path}/model')
"""
from .dummy_topic_model import DummyTopicModel
from .dummy_topic_model import WARNING_ALREADY_DUMMY
if hasattr(self, DummyTopicModel._dummy_attribute):
warnings.warn(WARNING_ALREADY_DUMMY)
return
if not save_to_drive:
save_path = None
else:
save_path = save_path or self.model_default_save_path
save_theta = self._model._cache_theta or (dataset is not None)
self.save(save_path, phi=True, theta=save_theta, dataset=dataset)
dummy = self.to_dummy(save_path=save_path)
dummy._original_model_save_folder_path = save_path
self._model.dispose()
self._model = dummy._model
del dummy
setattr(self, DummyTopicModel._dummy_attribute, True)
@property
def scores(self) -> Dict[str, List[float]]:
"""
Gets score values by name.
Returns
-------
dict : string -> list
dictionary with scores and corresponding values
"""
if self._scores_wrapper._score_caches is None:
self._scores_wrapper._score_caches = self._compute_score_values()
return self._scores_wrapper
@property
def description(self):
""" """
return self._description
@property
def regularizers(self):
"""
Gets regularizers from model.
"""
return self._model.regularizers
@property
def all_regularizers(self):
"""
Gets all regularizers with custom regularizers.
Returns
-------
regularizers_dict : dict
dict with artm.regularizer and BaseRegularizer instances
"""
regularizers_dict = dict()
for custom_regularizer_name, custom_regularizer in self.custom_regularizers.items():
regularizers_dict[custom_regularizer_name] = custom_regularizer
regularizers_dict.update(self._model.regularizers.data)
return regularizers_dict
def select_topics(self, substrings, invert=False):
"""
Gets all topics containing specified substring
Returns
-------
list
"""
return [
topic_name for topic_name in self.topic_names
if invert != any(
substring.lower() in topic_name.lower() for substring in substrings
)
]
@property
def background_topics(self):
return self.select_topics(["background", "bcg"])
@property
def specific_topics(self):
return self.select_topics(["background", "bcg"], invert=True)
@property
def class_ids(self):
""" """
return self._model.class_ids
def describe_scores(self, verbose=False):
data = []
for score_name, score in self.scores.items():
data.append([self.model_id, score_name, score[-1]])
result = | pd.DataFrame(columns=["model_id", "score_name", "last_value"], data=data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[74]:
import pandas as pd
import numpy as np
from pathlib import Path
import os
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.decomposition import PCA
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline
from scipy import stats
from joblib import dump
from joblib import load
import xgboost as xgb
import matplotlib.pyplot as plt
from typing import Dict
from src.data import make_dataset
from kaggle.api.kaggle_api_extended import KaggleApi
from dotenv import find_dotenv, load_dotenv
# In[78]:
load_dotenv(find_dotenv())
api = KaggleApi()
api.authenticate()
# In[80]:
competition = os.environ['COMPETITION']
# # Set up directories
# In[65]:
project_dir = Path.cwd().parent
data_dir = project_dir / 'data'
raw_data_dir = data_dir / 'raw'
interim_data_dir = data_dir / 'interim'
processed_data_dir = data_dir / 'processed'
models_dir = project_dir / 'models'
# # Load data
# In[57]:
df_train = | pd.read_csv(raw_data_dir / 'train.csv') | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license
# agreements. See the NOTICE file distributed with this work for additional
# information regarding copyright ownership. The Modin Development Team
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# This file is copied and adapted from:
# http://github.com/modin-project/modin/master/modin/pandas/test/test_general.py
import sys
import pytest
import pandas
import numpy as np
from numpy.testing import assert_array_equal
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
modin_compatible_version = sys.version_info >= (3, 7, 0)
modin_installed = True
if modin_compatible_version:
try:
import modin # noqa: F401
except ModuleNotFoundError:
modin_installed = False
skip = not modin_compatible_version or not modin_installed
# These tests are written for versions of Modin that require python 3.7+
pytestmark = pytest.mark.skipif(skip, reason="Outdated or missing Modin dependency")
if not skip:
from ray.tests.modin.modin_test_utils import df_equals
import modin.pandas as pd
# Module scoped fixture. Will first run all tests without ray
# client, then rerun all tests with a single ray client session.
@pytest.fixture(params=[False, True], autouse=True, scope="module")
def run_ray_client(request):
if request.param:
with ray_start_client_server() as client:
yield client
else:
# Run without ray client (do nothing)
yield
# Cleanup state before rerunning tests with client
ray.shutdown()
random_state = np.random.RandomState(seed=42)
# Size of test dataframes
NCOLS, NROWS = (2 ** 6, 2 ** 8)
# Range for values for test data
RAND_LOW = 0
RAND_HIGH = 100
# Input data and functions for the tests
# The test data that we will test our code against
test_data = {
"int_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(
RAND_LOW, RAND_HIGH, size=(NROWS)
)
for i in range(NCOLS)
},
"float_nan_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
x
if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)
else np.NaN
for j, x in enumerate(
random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))
)
]
for i in range(NCOLS)
},
}
test_data["int_data"]["index"] = test_data["int_data"].pop(
"col{}".format(int(NCOLS / 2))
)
for col in test_data["float_nan_data"]:
for row in range(NROWS // 2):
if row % 16 == 0:
test_data["float_nan_data"][col][row] = np.NaN
test_data_values = list(test_data.values())
test_data_keys = list(test_data.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notnull(pandas_df)
modin_result = pd.notnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
def test_merge():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = pd.merge(modin_df, modin_df2, how=how)
pandas_result = pandas.merge(pandas_df, pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
s = pd.Series(frame_data.get("col1"))
with pytest.raises(ValueError):
pd.merge(s, modin_df2)
with pytest.raises(TypeError):
pd.merge("Non-valid type", modin_df2)
def test_pivot():
test_df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
df = pd.pivot(test_df, index="foo", columns="bar", values="baz")
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot(test_df["bar"], index="foo", columns="bar", values="baz")
def test_pivot_table():
test_df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
df = pd.pivot_table(
test_df, values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot_table(
test_df["C"], values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
def test_unique():
modin_result = pd.unique([2, 1, 3, 3])
pandas_result = pandas.unique([2, 1, 3, 3])
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series([2] + [1] * 5))
pandas_result = pandas.unique(pandas.Series([2] + [1] * 5))
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])
)
pandas_result = pandas.unique(
pandas.Series([pandas.Timestamp("20160101"), pandas.Timestamp("20160101")])
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Series(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Series(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Index(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Index(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series(pd.Categorical(list("baabc"))))
pandas_result = pandas.unique(pandas.Series(pandas.Categorical(list("baabc"))))
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
def test_to_datetime():
# DataFrame input for to_datetime
modin_df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
pandas_df = pandas.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df_equals(pd.to_datetime(modin_df), pandas.to_datetime(pandas_df))
# Series input for to_datetime
modin_s = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
pandas_s = | pandas.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000) | pandas.Series |
import os
from operator import itemgetter
from os import listdir
import numpy as np
import pandas as pd
from keras.models import load_model
from general_helper import coeff_determination
from processor import sdf_to_csv
from rdkit import Chem
from sklearn.externals import joblib
import sklearn
print(sklearn.__version__)
suppl = Chem.SDMolSupplier(
'C:\PycharmProjects\ml-data-qsar\TEST\LC50\LC50_training.sdf')
molecules = [x for x in suppl if x is not None]
molecules = molecules
fptype = [{'Type': 'DESC'},
{'Type': 'MACCS'},
{'Type': 'FCFC','Size': 512,'Radius':3},
{'Type': 'AVALON','Size': 512}]
dataframe = sdf_to_csv('LC50_prediction', fptype=fptype, molecules=molecules)
folder_path = 'C:\PycharmProjects\ml-models\\UBC\Half_LIfe_U_2018_03_18__14_24_16_DESC_MACCS_FCFC_512_3_AVALON_512_scaled___'
models_paths = [os.path.join(folder_path, x) for x in listdir(folder_path) if x.split('.')[-1] == 'h5']
transformers = [os.path.join(folder_path, x) for x in listdir(folder_path) if x.split('.')[-1] == 'sav']
predicted_test_y_vectors = []
df_predict_clf = | pd.DataFrame() | pandas.DataFrame |
"""
aoe2netwrapper.converters
-------------------------
This module implements a high-level class with static methods to convert result of AoENetAPI methods to
pandas DataFrames.
"""
from typing import List
from loguru import logger
from aoe2netwrapper.models import (
LastMatchResponse,
LeaderBoardResponse,
MatchLobby,
NumOnlineResponse,
RatingTimePoint,
StringsResponse,
)
try:
import pandas as pd
except ImportError as error:
logger.error(
"User tried to use the 'converters' submodule without havinig installed the 'pandas' library."
)
raise NotImplementedError(
"The 'aoe2netwrapper.converters' module exports results to 'pandas.DataFrame' objects and "
"needs the 'pandas' library installed to function."
) from error
class Convert:
"""
This is a convenience class providing methods to convert the outputs from the AoE2NetAPI query methods
into pandas DataFrame objects. Every method below is a staticmethod, so no object has to be instantiated.
"""
@staticmethod
def strings(strings_response: StringsResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().strings to a pandas DataFrame.
Args:
strings_response (StringsResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the StringsResponse, each column being the values for a 'string' used
by the API, and the index being the ID numbers. Since this is the result of a join for many
'strings' that do not have the same amount of values, the resulting dataframe will contain NaNs
wherever a given 'string' does not have a value for the given index ID.
"""
if not isinstance(strings_response, StringsResponse):
logger.error("Tried to use method with a parameter of type != StringsResponse")
raise TypeError("Provided parameter should be an instance of 'StringsResponse'")
logger.debug("Converting StringsResponse to DataFrame")
dframe = pd.DataFrame(strings_response).transpose()
dframe.columns = dframe.iloc[0]
dframe = dframe.drop(index=[0]).reset_index(drop=True)
dframe = dframe.drop(columns=["language"])
logger.trace("Exporting each string attribute to its own dataframe and joining")
result = pd.DataFrame()
for col in dframe.columns:
intermediate = pd.DataFrame()
intermediate[col] = dframe[col][0]
intermediate["id"] = intermediate[col].apply(lambda x: x.id)
intermediate[col] = intermediate[col].apply(lambda x: x.string)
result = result.join(intermediate.set_index("id"), how="outer")
return result
@staticmethod
def leaderboard(leaderboard_response: LeaderBoardResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().leaderboard to a pandas DataFrame.
Args:
leaderboard_response (LeaderBoardResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the LeaderBoardResponse, each row being an entry in the leaderboard.
Top level attributes such as 'start' or 'total' are broadcast to an entire array the size of
the dataframe, and timestamps are converted to datetime objects.
"""
if not isinstance(leaderboard_response, LeaderBoardResponse):
logger.error("Tried to use method with a parameter of type != LeaderBoardResponse")
raise TypeError("Provided parameter should be an instance of 'LeaderBoardResponse'")
logger.debug("Converting LeaderBoardResponse leaderboard to DataFrame")
dframe = pd.DataFrame(leaderboard_response.leaderboard)
dframe = _export_tuple_elements_to_column_values_format(dframe)
logger.trace("Inserting LeaderBoardResponse attributes as columns")
dframe["leaderboard_id"] = leaderboard_response.leaderboard_id
dframe["start"] = leaderboard_response.start
dframe["count"] = leaderboard_response.count
dframe["total"] = leaderboard_response.total
logger.trace("Converting datetimes")
dframe["last_match"] = pd.to_datetime(dframe["last_match"], unit="s")
dframe["last_match_time"] = pd.to_datetime(dframe["last_match_time"], unit="s")
return dframe
@staticmethod
def lobbies(lobbies_response: List[MatchLobby]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().lobbies to a pandas DataFrame. The resulting
DataFrame will contain several rows for each lobby, namely as many as there are players in said
lobby. All global attributes of each lobby are broadcasted to arrays, making them duplicates.
To isolate a specific lobby, either call the AoE2NetAPI().match method with the lobby's UUID or
make use of the groupby functionality of pandas DataFrames.
Args:
lobbies_response (List[MatchLobby]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of MatchLobby elements..
"""
if not isinstance(lobbies_response, list): # move list to List[MatchLobby] when supporting > 3.9
logger.error("Tried to use method with a parameter of type != List[MatchLobby]")
raise TypeError("Provided parameter should be an instance of 'List[MatchLobby]'")
logger.debug("Converting Lobbies response to DataFrame")
unfolded_lobbies = [_unfold_match_lobby_to_dataframe(match_lobby) for match_lobby in lobbies_response]
return pd.concat(unfolded_lobbies).reset_index(drop=True)
@staticmethod
def last_match(last_match_response: LastMatchResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().last_match to a pandas DataFrame. There is not
much use to this as the DataFrame will only have one row, but the method is provided nonetheless in
case users want to concatenate several of these results in a DataFrame.
Args:
last_match_response (LastMatchResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of LastMatchResponse attributes. Beware: the 'players'
column is directly the content of the 'LastMatchResponse.last_match.players' attribute and as
such holds a list of LobbyMember objects.
"""
if not isinstance(last_match_response, LastMatchResponse):
logger.error("Tried to use method with a parameter of type != LastMatchResponse")
raise TypeError("Provided parameter should be an instance of 'LastMatchResponse'")
logger.debug("Converting LastMatchResponse last_match to DataFrame")
dframe = pd.DataFrame(last_match_response.last_match).transpose()
dframe.columns = dframe.iloc[0]
dframe = dframe.drop(0).reset_index()
logger.trace("Inserting LastMatchResponse attributes as columns")
dframe["profile_id"] = last_match_response.profile_id
dframe["steam_id"] = last_match_response.steam_id
dframe["name"] = last_match_response.name
dframe["country"] = last_match_response.country
return dframe
@staticmethod
def match_history(match_history_response: List[MatchLobby]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().match_history to a pandas DataFrame. The resulting
DataFrame will contain several rows for each lobby, namely as many as there are players in said
lobby. All global attributes of each lobby are broadcasted to arrays, making them duplicates.
To isolate a specific lobby, either call the AoE2NetAPI().match method with the lobby's UUID or
make use of the groupby functionality of pandas DataFrames.
Args:
match_history_response (List[MatchLobby]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of MatchLobby elements.
"""
# move list to List[MatchLobby] when supporting > 3.9
if not isinstance(match_history_response, list):
logger.error("Tried to use method with a parameter of type != List[MatchLobby]")
raise TypeError("Provided parameter should be an instance of 'List[MatchLobby]'")
logger.debug("Converting Match History response to DataFrame")
unfolded_lobbies = [
_unfold_match_lobby_to_dataframe(match_lobby) for match_lobby in match_history_response
]
return pd.concat(unfolded_lobbies).reset_index(drop=True)
@staticmethod
def rating_history(rating_history_response: List[RatingTimePoint]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().leaderboard to a pandas DataFrame.
Args:
rating_history_response (List[RatingTimePoint]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of RatingTimePoint elements, each row being the information from
one RatingTimePoint in the list. Timestamps are converted to datetime objects.
"""
# move list to List[RatingTimePoint] when supporting > 3.9
if not isinstance(rating_history_response, list):
logger.error("Tried to use method with a parameter of type != List[RatingTimePoint]")
raise TypeError("Provided parameter should be an instance of 'List[RatingTimePoint]'")
logger.debug("Converting Rating History rsponse to DataFrame")
dframe = pd.DataFrame(rating_history_response)
dframe = _export_tuple_elements_to_column_values_format(dframe)
logger.trace("Converting timestamps to datetime objects")
dframe["time"] = pd.to_datetime(dframe["timestamp"], unit="s")
dframe = dframe.drop(columns=["timestamp"])
return dframe
@staticmethod
def matches(matches_response: List[MatchLobby]) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().match_history to a pandas DataFrame. The resulting
DataFrame will contain several rows for each lobby, namely as many as there are players in said
lobby. All global attributes of each lobby are broadcasted to arrays, making them duplicates.
To isolate a specific lobby, either call the AoE2NetAPI().match method with the lobby's UUID or
make use of the groupby functionality of pandas DataFrames.
Args:
matches_response (List[MatchLobby]): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the list of MatchLobby elements.
"""
if not isinstance(matches_response, list): # move list to List[MatchLobby] when supporting > 3.9
logger.error("Tried to use method with a parameter of type != List[MatchLobby]")
raise TypeError("Provided parameter should be an instance of 'List[MatchLobby]'")
logger.debug("Converting Match History response to DataFrame")
unfolded_lobbies = [_unfold_match_lobby_to_dataframe(match_lobby) for match_lobby in matches_response]
return pd.concat(unfolded_lobbies).reset_index(drop=True)
@staticmethod
def match(match_response: MatchLobby) -> pd.DataFrame:
"""
Convert the content of a MatchLobby to a pandas DataFrame. The resulting DataFrame will have as many
rows as there are players in the lobby, and all global attributes will be broadcasted to columns of
the same length, making them duplicates.
Args:
match_response (MatchLobby): a MatchLobby object.
Returns:
A pandas DataFrame from the MatchLobby attributes, each row being global information from the
MatchLobby as well as one of the players in the lobby.
"""
return _unfold_match_lobby_to_dataframe(match_response)
@staticmethod
def num_online(num_online_response: NumOnlineResponse) -> pd.DataFrame:
"""
Convert the result given by a call to AoE2NetAPI().num_online to a pandas DataFrame.
Args:
num_online_response (NumOnlineResponse): the response directly returned by your AoE2NetAPI
client.
Returns:
A pandas DataFrame from the NumOnlineResponse, each row being an entry in the leaderboard.
Top level attributes such as 'app_id' are broadcast to an entire array the size of the
dataframe, and timestamps are converted to datetime objects.
"""
if not isinstance(num_online_response, NumOnlineResponse):
logger.error("Tried to use method with a parameter of type != NumOnlineResponse")
raise TypeError("Provided parameter should be an instance of 'NumOnlineResponse'")
logger.debug("Converting NumOnlineResponse to DataFrame")
dframe = pd.DataFrame(num_online_response.dict())
logger.trace("Exporting 'player_stats' attribute contents to columns")
dframe["time"] = dframe.player_stats.apply(lambda x: x["time"]).apply(pd.to_datetime)
dframe["steam"] = dframe.player_stats.apply(lambda x: x["num_players"]["steam"])
dframe["looking"] = dframe.player_stats.apply(lambda x: x["num_players"]["looking"])
dframe["in_game"] = dframe.player_stats.apply(lambda x: x["num_players"]["in_game"])
dframe["multiplayer"] = dframe.player_stats.apply(lambda x: x["num_players"]["multiplayer"])
dframe["multiplayer_1h"] = dframe.player_stats.apply(lambda x: x["num_players"]["multiplayer_1h"])
dframe["multiplayer_24h"] = dframe.player_stats.apply(lambda x: x["num_players"]["multiplayer_24h"])
logger.trace("Removing 'player_stats' column to avoid nested & duplicate data")
dframe = dframe.drop(columns=["player_stats"])
return dframe
# ----- Helpers ----- #
def _export_tuple_elements_to_column_values_format(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Take in a pandas DataFrame with simple int values as columns, and elements being a tuple of
(attribute_name, value) and cast it to have the attribute_name as column names, and the values as values.
The original columns will be dropped in the process.
Args:
dataframe (pd.DataFrame): your pandas DataFrame.
Returns:
The refactored pandas DataFrame.
"""
dframe = dataframe.copy(deep=True)
logger.trace("Exporting attributes to columns and removing duplicate data")
for _, col_index in enumerate(dframe.columns):
attribute = dframe[col_index][0][0]
dframe[attribute] = dframe[col_index].apply(lambda x: x[1])
dframe = dframe.drop(columns=[col_index])
return dframe
def _unfold_match_lobby_to_dataframe(match_lobby: MatchLobby) -> pd.DataFrame:
"""
Convert the content of a MatchLobby to a pandas DataFrame. The resulting DataFrame will have as many
rows as there are players in the lobby, and all global attributes will be broadcasted to columns of the
same length, making them duplicates.
Args:
match_lobby (MatchLobby): a MatchLobby object.
Returns:
A pandas DataFrame from the MatchLobby attributes, each row being global information from the
MatchLobby as well as one of the players in the lobby.
"""
if not isinstance(match_lobby, MatchLobby):
logger.error("Tried to use method with a parameter of type != MatchLobby")
raise TypeError("Provided parameter should be an instance of 'MatchLobby'")
logger.trace("Unfolding MatchLobby.players contents to DataFrame")
dframe = pd.DataFrame(match_lobby.players)
dframe = _export_tuple_elements_to_column_values_format(dframe)
dframe = dframe.rename(columns={"name": "player"})
logger.trace("Broadcasting global MatchLobby attributes")
attributes_df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# # CaBi ML fitting
# In this notebook, I extend the ML framework that I used on the UCI data to the CaBi data.
#
# This version includes all variables labeled "for ML" in the data dictionary as an illustrative example.
# ## 0. Data load, shaping, and split
# * Read in data from AWS
# * Aside - note multicollinearity
# * Encode time variable (day_of_year) as cyclical
# * Split into Xtrain, Xtest, ytrain, ytest based on date
# * Specify feature and target columns
# In[1]:
# Read in data from AWS
from util_functions import *
import numpy as np
import pandas as pd
set_env_path()
conn, cur = aws_connect()
'''
For this nb, I only pull the date variable day_of_year for later transformation into cyclical time variables.
Not 100% sure on whether or not this precludes using things like OneHotEncoded day_of_week, but I omit that here.
I also omit actual temperature variables in favor of apparent temperature.
Some other weather variables are omitted like moonphase and windbearing
'''
query = """
SELECT
EXTRACT(DOY FROM date) as day_of_year,
date,
daylight_hours,
apparenttemperaturehigh,
apparenttemperaturelow,
cloudcover,
dewpoint,
humidity,
precipaccumulation,
precipintensitymax,
precipprobability,
rain,
snow,
visibility,
windspeed,
us_holiday,
nats_single,
nats_double,
dc_bike_event,
dc_pop,
cabi_bikes_avail,
cabi_stations_alx,
cabi_stations_arl,
cabi_stations_ffx,
cabi_stations_mcn,
cabi_stations_mcs,
cabi_stations_wdc,
cabi_docks_alx,
cabi_docks_arl,
cabi_docks_ffx,
cabi_docks_mcn,
cabi_docks_mcs,
cabi_docks_wdc,
cabi_stations_tot,
cabi_docks_tot,
cabi_dur_empty_wdc,
cabi_dur_full_wdc,
cabi_dur_empty_arl,
cabi_dur_full_arl,
cabi_dur_full_alx,
cabi_dur_empty_alx,
cabi_dur_empty_mcs,
cabi_dur_full_mcs,
cabi_dur_full_mcn,
cabi_dur_empty_mcn,
cabi_dur_full_ffx,
cabi_dur_empty_ffx,
cabi_dur_empty_tot,
cabi_dur_full_tot,
cabi_active_members_day_key,
cabi_active_members_monthly,
cabi_active_members_annual,
cabi_trips_wdc_to_wdc,
cabi_trips_wdc_to_wdc_casual
from final_db"""
pd.options.display.max_rows = None
pd.options.display.max_columns = None
df = pd.read_sql(query, con=conn)
df.set_index(df.date, drop=True, inplace=True)
df.index = pd.to_datetime(df.index)
print("We have {} instances and {} features".format(*df.shape))
# In[2]:
df.describe(percentiles=[.5]).round(3).transpose()
# In[3]:
def print_highly_correlated(df, features, threshold=0.75):
"""Prints highly correlated feature pairs in df"""
corr_df = df[features].corr()
# Select pairs above threshold
correlated_features = np.where(np.abs(corr_df) > threshold)
# Avoid duplication
correlated_features = [(corr_df.iloc[x,y], x, y) for x, y in zip(*correlated_features) if x != y and x < y]
# Sort by abs(correlation)
s_corr_list = sorted(correlated_features, key=lambda x: -abs(x[0]))
print("There are {} feature pairs with pairwise correlation above {}".format(len(corr_df.columns), threshold))
for v, i, j in s_corr_list:
cols = df[features].columns
print("{} and {} = {:0.3f}".format(corr_df.index[i], corr_df.columns[j], v))
# In[4]:
# Note multicollinearity
print_highly_correlated(df, df.columns, threshold=0.75)
# In[5]:
# Encode day_of_year as cyclical
df['sin_day_of_year'] = np.sin(2*np.pi*df.day_of_year/365)
df['cos_day_of_year'] = np.cos(2*np.pi*df.day_of_year/365)
# ### Notes about dates in our data
# Start date = earliest 1/1/2013, flexible
#
# Can use all of 2017 through September 8 as test set
#
# For cross-validation, randomly assigned
#
# Whatever % we use for train/test we should use for CV
#
# Put date into index and use loc to do train test split
#
#
# * Split into Xtrain, Xtest, ytrain, ytest based on date
# * Training dates = 2013-01-01 to 2016-12-31
# * Test dates = 2017-01-01 to 2017-09-08
# * New data (coincides with beginning of dockless pilot) = 2017-09-09 to present
# In[6]:
# Train test split
train = df.loc['2013-01-01':'2016-12-31']
test = df.loc['2017-01-01':'2017-09-08']
print(train.shape, test.shape)
tr = train.shape[0]
te = test.shape[0]
trpct = tr/(tr+te)
tepct = te/(tr+te)
print("{:0.3f} percent of the data is in the training set and {:0.3f} percent is in the test set".format(trpct, tepct))
# In[7]:
# Specify columns to keep and drop for X and y
drop_cols = ['date', 'day_of_year']
y_cols = ['cabi_trips_wdc_to_wdc', 'cabi_trips_wdc_to_wdc_casual']
feature_cols = [col for col in df.columns if (col not in y_cols) & (col not in drop_cols)]
# Train test split
Xtrain_raw = train[feature_cols]
ytrain = train[y_cols[0]]
Xtest_raw = test[feature_cols]
ytest = test[y_cols[0]]
print(Xtrain_raw.shape, ytrain.shape, Xtest_raw.shape, ytest.shape)
# ### 1. Preprocessing
#
# We want to use PolynomialFeatures and StandardScaler in a Pipeline, but we only want to scale continuous features.
#
# We can do this by using FeatureUnion.
#
# Here, I do the polynomial transformation first and then feed it through a pipeline because I wasn't able to get it all working in one pipeline.
#
# * Use PolynomialFeatures to create quadratic and interaction terms
# * Create pipeline for selectively scaling certain variables
# * Fit and transform using pipeline to get final Xtrain and Xtest
# In[8]:
# Imports and custom classes
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.preprocessing import PolynomialFeatures, StandardScaler, MinMaxScaler
from sklearn.base import BaseEstimator, TransformerMixin
class Columns(BaseEstimator, TransformerMixin):
''' This is a custom transformer for splitting the data into subsets for FeatureUnion.
'''
def __init__(self, names=None):
self.names = names
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X):
return X[self.names]
class CustomPoly(BaseEstimator, TransformerMixin):
''' This is a custom transformer for making sure PolynomialFeatures
outputs a labeled df instead of an array. It doesn't work as is, but
I'm keeping the code here if we need it later.
'''
def __init__(self):
self.pf = None
def fit(self, X, y=None):
self.pf = PolynomialFeatures(2, include_bias=False).fit(X)
return self
def transform(self, X):
Xpf = self.pf.transform(X)
colnames = self.pf.get_feature_names(X.columns)
Xpoly = pd.DataFrame(Xpf, columns=colnames)
return Xpoly
# In[9]:
# PolynomialFeatures
# Should ultimately be part of a Pipeline, but I had issues because my custom Columns class takes a df
# CustomPoly above is an attempt to output a df
pf = PolynomialFeatures(2, include_bias=False)
Xtrain_pf_array = pf.fit_transform(Xtrain_raw)
Xtest_pf_array = pf.transform(Xtest_raw)
# Get feature names
Xtrain_cols = pf.get_feature_names(Xtrain_raw.columns)
# Output two DataFrames with the new poly columns
Xtrain_pf = pd.DataFrame(Xtrain_pf_array, columns=Xtrain_cols)
Xtest_pf = | pd.DataFrame(Xtest_pf_array, columns=Xtrain_cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
from sklearn.metrics import r2_score
import warnings
from scipy.interpolate import interp1d
import numpy as np
__author__ = '<NAME>, <NAME>'
__copyright__ = '© Pandemic Central, 2021'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/solveforj/pandemic-central'
__version__ = '3.0.0'
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District Of Columbia': 'DC',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
def get_state_fips():
# Source: US census
# Link: www.census.gov/geographies/reference-files/2017/demo/popest/2017-fips.html
# File: 2017 State, County, Minor Civil Division, and Incorporated Place FIPS Codes
# Note: .xslx file header was removed and sheet was exported to csv
fips_data = pd.read_csv("data/geodata/all-geocodes-v2017.csv",encoding = "ISO-8859-1", dtype={'State Code (FIPS)': str, 'County Code (FIPS)': str})
# Map 040 level fips code to state name in dictionary
state_data = fips_data[fips_data['Summary Level'] == 40].copy(deep=True)
state_data['state_abbrev'] = state_data['Area Name (including legal/statistical area description)'].apply(lambda x : us_state_abbrev[x])
state_map = pd.Series(state_data['State Code (FIPS)'].values,index=state_data['state_abbrev']).to_dict()
state_map['AS'] = "60"
state_map['GU'] = "66"
state_map['MP'] = "69"
state_map['PR'] = "72"
state_map['VI'] = "78"
# Get all county fips codes
fips_data = fips_data[fips_data['Summary Level'] == 50]
fips_data.insert(0, 'FIPS', fips_data['State Code (FIPS)'] + fips_data['County Code (FIPS)'])
fips_data = fips_data[['FIPS', 'State Code (FIPS)']]
return state_map, fips_data
def align_rt(county_rt):
print(" • Loading input Rt, testing, and cases datasets")
#county_rt = pd.read_csv("data/Rt/rt_data.csv", dtype={"FIPS":str})
#county_rt = county_rt[~county_rt['RtIndicator'].isnull()]
#county_rt['state_rt'] = county_rt['state_rt'].fillna(method='ffill')
#print(county_rt)
#print(len(county_rt[county_rt['FIPS'] == "01001"]))
#print(county_rt.groupby("FIPS").tail(1)['date'].unique())
case_data = pd.read_csv("data/JHU/jhu_data.csv", dtype={"FIPS":str})
#print(case_data.groupby("FIPS").tail(1)['date'].unique())
final = pd.merge(left=county_rt, right=case_data, how="left", on=['FIPS', 'date'], copy=False)
#print(len(final[final['FIPS'] == "01001"]))
#print(final.groupby("FIPS").tail(1)['date'].unique())
testing_data = pd.read_csv("data/COVIDTracking/testing_data.csv.gz", dtype={"FIPS":str})
#print(testing_data.groupby("FIPS").tail(1)['date'].unique())
final = pd.merge(left=final, right=testing_data, how="left", on=['FIPS','date'], copy=False)
#print(len(final[final['FIPS'] == "01001"]))
#print(final.groupby("FIPS").tail(1)['date'].unique())
final[['confirmed_cases_norm','confirmed_cases']] = final[['confirmed_cases_norm','confirmed_cases']].mask(final[['confirmed_cases_norm','confirmed_cases']] < 0, 0)
final['normalized_cases_norm'] = (final['confirmed_cases_norm']/final['totalTestResultsIncrease_norm'])
final['normalized_cases'] = (final['confirmed_cases']/final['totalTestResultsIncrease'])
final = final.sort_values(["state", "FIPS"])
county_counts = final.groupby("state", as_index=False).apply(lambda x: len(x['FIPS'].unique()))
county_counts.columns = ["state", "unique_counties"]
county_counts = county_counts['unique_counties'].to_list()
state_counts = final.groupby("state", as_index=False).apply(lambda x: len(x['FIPS']))
state_counts.columns = ['state','total_counties']
state_counts = state_counts['total_counties'].to_list()
ccounts = []
for i in range(len(state_counts)):
lst = [county_counts[i]]*state_counts[i]
ccounts += lst
final['county_counts'] = ccounts
track_higher_corrs = {'FIPS':[], 'region':[], 'shift':[], 'correlation': []}
#print(final.columns)
#final = final[(final['FIPS'].str.startswith("04")) | (final['FIPS'].str.startswith("01"))]
#print(len(final[final['FIPS'] == "01001"]))
#print(final)
#print("Latest Date")
#print(final.sort_values('date').groupby("FIPS").tail(1)['date'].unique())
def get_optimal_lag(realtime, backlag, predict_shift):
corrs = []
for i in range(0,75):
corrs.append(realtime.corr(backlag.shift(periods=i)))
max_index = corrs.index(max(corrs))
col1 = backlag.shift(periods=max_index - predict_shift).reset_index(drop=True)
col2 = pd.Series([max_index] * len(col1))
col3 = pd.Series([max(corrs)] * len(col1))
result = pd.concat([col1, col2, col3], axis=1).reset_index(drop=True)
return result
def get_prediction(y, x, x_var, shift):
X = np.array(x).reshape(-1,x_var)
y = np.array(y).reshape(-1,1)
poly = PolynomialFeatures(1)
X = poly.fit_transform(X)
regr = LinearRegression().fit(X, y)
coefficients = regr.coef_
intercept = regr.intercept_
return coefficients, intercept, shift.values.flatten()
def shift_fraction(name, column, predict_shift):
new_col = column.shift(periods = -1 * predict_shift).replace(0,0.0001)
if predict_shift <= 14:
new_col = pd.concat([new_col.iloc[0:-22], new_col.iloc[-22:].interpolate(method='spline', order=1)], axis=0)
if predict_shift > 14:
new_col = pd.concat([new_col.iloc[0:-20], new_col.iloc[-20:].interpolate(method='spline', order=1)], axis=0)
return new_col
def make_prediction(fips, dictionary, x, x_var):
index = x.index
X = np.array(x).reshape(-1,x_var)
poly = PolynomialFeatures(1)
X = poly.fit_transform(X)
coefficients = dictionary[fips][0]
intercept = dictionary[fips][1]
predictions = np.dot(X, coefficients.reshape(-1, 1)) + intercept
output = pd.Series(predictions.flatten().tolist())
output.index = index.tolist()
return output
def dual_shifter(col1, shift1, col2, shift2):
if shift1 > shift2:
print([shift2]*len(col2))
return pd.concat([col1.shift(shift1-shift2).reset_index(drop=True), col2.reset_index(drop=True), pd.Series([shift2]*len(col2))], axis=1)
else:
print([shift1]*len(col2))
return pd.concat([col1.reset_index(drop=True), col2.shift(shift2-shift1).reset_index(drop=True), pd.Series([shift1]*len(col1))], axis=1)
def interpolator(col, col_name):
if len(col.dropna()) == 0:
return col
num = -1*predict - 14
interpolate_portion = col.iloc[num:]
interpolate_portion = pd.concat([pd.Series(range(len(interpolate_portion))), interpolate_portion.reset_index(drop=True)], axis=1)
train_col = interpolate_portion.dropna()
f = np.poly1d(np.polyfit(train_col[0],train_col[col_name], 1))
interpolate_portion[col_name] = interpolate_portion[col_name].fillna(interpolate_portion[0].apply(f))
#interpolate_portion[col_name] = interpolate_portion[col_name].fillna(method='ffill')
col_out = pd.concat([col.iloc[0:num], interpolate_portion[col_name]], axis=0)
col_out.index = col.index
return col_out
def optimizer(name, col_a, shift_a, corr_a, col_b, shift_b, corr_b):
corr_a = max(corr_a.fillna(0).tolist())
corr_b = max(corr_b.fillna(0).tolist())
shift_a = max(shift_a.fillna(0).tolist())
shift_b = max(shift_b.fillna(0).tolist())
track_higher_corrs['FIPS'].append(name)
if corr_a >= corr_b:
new_col = pd.concat([col_a.reset_index(drop=True), pd.Series([shift_a]*col_a.shape[0])], axis=1)
new_col.columns = ['rt_final_unaligned', 'rt_final_shift']
track_higher_corrs['region'].append('county')
track_higher_corrs['shift'].append(new_col['rt_final_shift'].iloc[0])
track_higher_corrs['correlation'].append(corr_a)
return new_col
else:
new_col = pd.concat([col_b.reset_index(drop=True), pd.Series([shift_b]*col_b.shape[0])], axis=1)
new_col.columns = ['rt_final_unaligned', 'rt_final_shift']
track_higher_corrs['region'].append('state')
track_higher_corrs['shift'].append(new_col['rt_final_shift'].iloc[0])
track_higher_corrs['correlation'].append(corr_b)
return new_col
# Align the Rt.live Rt
print(" • Calculating optimal Rt shifts")
final_estimate = final[(final['date'] > "2020-03-30")]
final_estimate = final_estimate[~final_estimate['normalized_cases_norm'].isnull()]
final_estimate = final_estimate.reset_index(drop=True)
#print(len(final_estimate[final_estimate['FIPS'] == "01001"]))
# Align the Rt.live (state) Rt so that it is maximally correlated with test positivity by shifting it forward
new_col = final_estimate.groupby("FIPS", as_index=False).apply(lambda x : get_optimal_lag(x['normalized_cases'], x['state_rt'], 0)).reset_index(drop=True)
final_estimate[["aligned_state_rt","ECR_shift", "ECR_correlation"]] = new_col
# Use the aligned state Rt to calculate an estimated county Rt that is also aligned
#print(" Aligning COVID Act Now county-level Rt")
# Find rows with a calculated and estimated county Rt
with_county_rt = final_estimate[~final_estimate['RtIndicator'].isnull()].dropna().reset_index(drop=True)
# Find rows with a estimated county Rt only
without_county_rt = final_estimate[final_estimate['RtIndicator'].isnull()]
# Align the COVID act now Rt so that it is maximally correlated with cases by shifting it forward
new_col = with_county_rt.groupby("FIPS", as_index=False).apply(lambda x : get_optimal_lag(x['normalized_cases'], x['RtIndicator'], 0)).reset_index(drop=True)
with_county_rt[['CAN_county_rt','CAN_shift', "CAN_correlation"]] = new_col
# Drop NA values from each of the three dataframes
without_county_rt = without_county_rt.replace([np.inf, -np.inf], np.nan)
without_county_rt = without_county_rt[['FIPS', 'date', 'state_rt', 'normalized_cases_norm', 'confirmed_cases_norm', 'ECR_shift', 'ECR_correlation']].interpolate().dropna()
with_county_rt = with_county_rt.replace([np.inf, -np.inf], np.nan)
with_county_rt = with_county_rt[['FIPS', 'date', 'state_rt', 'aligned_state_rt', 'normalized_cases_norm','confirmed_cases_norm','ECR_shift', 'ECR_correlation', 'RtIndicator', 'CAN_county_rt', 'CAN_shift', 'CAN_correlation']].interpolate().dropna()
final_estimate = final_estimate.replace([np.inf, -np.inf], np.nan)
final_estimate = final_estimate[['FIPS', 'date', 'state_rt', 'aligned_state_rt', 'normalized_cases_norm', 'confirmed_cases_norm', 'ECR_shift', 'ECR_correlation']].interpolate().dropna()
with_county_rt_merge = with_county_rt[['FIPS', 'date', 'CAN_county_rt', 'RtIndicator', 'CAN_shift', 'CAN_correlation']]
merged = pd.merge(left=final_estimate, right=with_county_rt_merge, how='left', on=['FIPS', 'date'], copy=False)
#print(len(merged[merged['FIPS'] == "01001"]))
print(" • Computing case predictions from aligned Rt")
merged = merged.reset_index(drop=True)
new_col = merged.groupby("FIPS", as_index=False).apply(lambda x : optimizer(x.name, x['RtIndicator'], x['CAN_shift'], x['CAN_correlation'], x['state_rt'], x['ECR_shift'], x['ECR_correlation']))
merged[['rt_final_unaligned', 'rt_final_shift']] = new_col.reset_index(drop=True)
pd.DataFrame.from_dict(track_higher_corrs, orient='index').transpose().to_csv('data/Rt/higher_corrs.csv', index=False, sep=',')
new_col = merged.groupby("FIPS", as_index=False).apply(lambda x : (x["rt_final_unaligned"].shift(int(x['rt_final_shift'].iloc[0]))))
merged["rt_final_aligned"] = new_col.reset_index(drop=True)
#print(len(merged))
#print(merged[merged['FIPS'] == "01001"])
merged_training = merged[(~merged['rt_final_aligned'].isnull())]
#print(len(merged_training))
prediction_dict = merged_training.groupby("FIPS").apply(lambda x : get_prediction(x['normalized_cases_norm'], x['rt_final_aligned'], 1, x['rt_final_shift'].head(1))).to_dict()
merged_training = merged_training[~merged_training['rt_final_unaligned'].isnull()].reset_index(drop=True)
new_col = merged_training.groupby("FIPS", as_index=False).apply(lambda x : make_prediction(x.name, prediction_dict, x['rt_final_unaligned'], 1))
merged_training["prediction_unaligned"] = new_col.reset_index(drop=True)
#print("Merged Training")
#print(merged_training)
#print(len(merged_training[merged_training['FIPS'] == "01001"]))
shift_dates = [7, 14, 21, 28]
for predict in shift_dates:
print(" • Shifting case predictions and Rt for " + str(predict) + "-day forecasts")
dat = merged_training.copy(deep=True)
# Shift
new_col = dat.groupby("FIPS", as_index=False).apply(lambda x : (x["prediction_unaligned"].shift(int(x['rt_final_shift'].unique()[0] - predict))))
dat["prediction_aligned_" + str(predict)] = new_col.reset_index(drop=True)
new_col = dat.groupby("FIPS", as_index=False).apply(lambda x : (x["rt_final_unaligned"].shift(int(x['rt_final_shift'].unique()[0] - predict))))
dat["rt_aligned_" + str(predict)] = new_col.reset_index(drop=True)
# Interpolate
new_col = dat.groupby("FIPS", as_index=False).apply(lambda x : interpolator(x["prediction_aligned_" + str(predict)], "prediction_aligned_" + str(predict)))
dat["prediction_aligned_int_" + str(predict)] = new_col.reset_index(drop=True).clip(lower=0)
new_col = dat.groupby("FIPS", as_index=False).apply(lambda x : interpolator(x["rt_aligned_" + str(predict)], "rt_aligned_" + str(predict)))
dat["rt_aligned_int_" + str(predict)] = new_col.reset_index(drop=True).clip(lower=0)
# Correlate
#print()
#print(dat['normalized_cases_norm'].shift(-1*predict).corr(dat['prediction_aligned_int_'+str(predict)]))
#print(dat['normalized_cases_norm'].shift(-1*predict).corr(dat['rt_aligned_int_'+str(predict)]))
#print(dat.groupby("FIPS").tail(1)['date'].unique())
#print()
dat = dat[['FIPS', 'date','normalized_cases_norm', 'prediction_aligned_int_' + str(predict), 'rt_aligned_int_'+str(predict)]]
#print(len(dat[dat['FIPS'] == "01001"]))
dat.to_csv("data/Rt/aligned_rt_"+str(predict)+".csv", index=False, sep=',')
def warning_suppressor(debug_mode=True):
if not debug_mode:
warnings.filterwarnings("ignore")
def update_Rt(can_key):
warning_suppressor(debug_mode=False) # Change it to show errors
print("• Downloading Rt dataset")
state_map, fips_data = get_state_fips()
s_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
# Rt calculations from rt.live
rt_data = pd.read_csv("https://api.covidactnow.org/v2/states.timeseries.csv?apiKey=" + can_key, dtype={"fips": str}, \
usecols=['date', 'fips', 'metrics.infectionRate'])
rt_data = rt_data.rename({'fips':'state', 'metrics.infectionRate':'state_rt'}, axis=1)
date_list = rt_data['date'].unique()
fips_list = fips_data['FIPS'].unique()
df = | pd.DataFrame() | pandas.DataFrame |
import json
import pathlib
import numpy as np
import matplotlib.pyplot as plt
import re
import pandas as pd
import os.path
from os import path
import math
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname):
self.data_fname=pathlib.WindowsPath(data_fname)
if path.exists(data_fname)==False:
raise ValueError('there is no such file')
def read_data(self):
"""Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
with open(self.data_fname) as f:
data = json.loads(f.read())
self.data=pd.DataFrame(data)
return (self.data)
def show_age_distrib(self) :
"""Calculates and plots the age distribution of the participants.
Returns
-------
hist : np.ndarray
Number of people in a given bin
bins : np.ndarray
Bin edges
"""
data=QuestionnaireAnalysis.read_data(self)
df_json=pd.DataFrame(data)
bins=[0,10,20,30,40,50,60,70,80,90,100]
hist=plt.hist(df_json.loc[df_json['age']!="nan",'age'],bins=bins)[0]
bins=plt.hist(df_json.loc[df_json['age']!="nan",'age'],bins=bins)[1]
plt.ylabel('Number of participants')
plt.xlabel('Age')
return((hist,bins))
def remove_rows_without_mail(self):
"""Checks self.data for rows with invalid emails, and removes them.
Returns
-------
df : pd.DataFrame
A corrected DataFrame, i.e. the same table but with the erroneous rows removed and
the (ordinal) index after a reset.
"""
data=QuestionnaireAnalysis.read_data(self)
df_json=pd.DataFrame(data)
mask=[]
for ind in range(len(df_json)):
if re.search(r'\w+@\w+.c',df_json.iloc[ind,3]):
mask.append(ind)
correct_email_df=df_json.iloc[mask]
correct_email_df.index=list(range(len(correct_email_df)))
return(correct_email_df)
def fill_na_with_mean(self):
"""Finds, in the original DataFrame, the subjects that didn't answer
all questions, and replaces that missing value with the mean of the
other grades for that student.
Returns
-------
df : pd.DataFrame
The corrected DataFrame after insertion of the mean grade
arr : np.ndarray
Row indices of the students that their new grades were generated
"""
data=QuestionnaireAnalysis.read_data(self)
df_json= | pd.DataFrame(data) | pandas.DataFrame |
# coding=utf-8
import requests
import time,datetime
#import json
#import smtplib
#import hashlib
#import pymysql
#from datetime import datetime
import pandas as pd
N =5
keys = ['牛奶','床',]
#获得即时数据
def get_real_time_data():
c_time = int(time.time())
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Host': 'www.smzdm.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
url = 'https://www.smzdm.com/homepage/json_more?timesort=' + str(c_time) + '&p=1'
r = requests.get(url=url, headers=headers)
# data = r.text.encode('utf-8').decode('unicode_escape')
data = r.text
dataa = json.loads(data)
dataa = dataa['data']
data = | pd.DataFrame(dataa) | pandas.DataFrame |
import pandas as pd
import numpy as np
from random import sample
from xgboost import XGBRegressor
from random import choices,seed
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats import t
import os
os.chdir("c://users/jliv/downloads/")
dat=pd.read_csv("auto-mpg.data",header=None)
"""
1. mpg: continuous
2. cylinders: multi-valued discrete
3. displacement: continuous
4. horsepower: continuous
5. weight: continuous
6. acceleration: continuous
7. model year: multi-valued discrete
8. origin: multi-valued discrete
9. car name: string (unique for each instance)
"""
pd.set_option("display.max_columns",19)
df=pd.DataFrame(dat[0].str.split(expand=True))
df=df[[0,1,2,3,4,5,6,7]].copy()
columns = ['mpg','cyl','disp','hp','weight','acc','yr','origin']
df.columns = columns
df.replace("?",np.nan,inplace=True)
for i in df.columns:
df[i]=df[i].astype(float)
for i in columns:
print(i,len(df[df[i].isna()]))
df.dropna(inplace=True)
seed(42)
train=sample(list(df.index),int(len(df.index)*.8))
train.sort()
test=[i for i in df.index if i not in train]
kpi='mpg'
feats=['cyl', 'disp', 'hp', 'weight', 'acc', 'yr', 'origin']
X=df[df.index.isin(train)][feats].copy()
Y=df[df.index.isin(train)][kpi]
xtest=df[df.index.isin(test)][feats].copy()
ytest=df[df.index.isin(test)][kpi]
means=np.mean(X)
stds=np.std(X)
X=(X-means)/stds
xtest=(xtest-means)/stds
corrdf=X.copy()
corrdf[kpi]=Y
corrdf.corr()[kpi]
corrdf.corr()
seed(42)
fold = pd.Series(choices(range(1,9),k=len(X)),index=X.index)
class mixed_model:
def __init__(self,mod,lr,epoch,optimization):
self.lr=lr
self.epoch=epoch
self.mod=mod
self.optimization=optimization
def fit(self,x,y,linear_feats):
self.x=x
self.y=y
self.linear_feats=linear_feats
self.other_feats=[i for i in self.x.columns if i not in self.linear_feats]
#self.coefs_=np.random.normal(0,.5,len(self.linear_feats))
self.coefs_=np.zeros(len(self.linear_feats))
self.rmse_ = []
self.coefs_per_epoch=[]
for e in range(0,self.epoch):
self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coefs_)
resid = (self.y-self.x[self.linear_feats]@self.coefs_-self.mod.predict(self.x[self.other_feats]))
[email protected][self.linear_feats]
self.rmse_.append(np.mean(resid**2)**.5)
if self.optimization =='Newtonian':
H = np.linalg.pinv(self.x[self.linear_feats][email protected][self.linear_feats])
term = grad@H
else:
term = grad
self.coefs_=self.coefs_+self.lr*grad
self.coefs_=self.coefs_+self.lr*term
self.coefs_per_epoch.append(list(self.coefs_))
self.epochs_completed_=e
self.converged_ = []
#if e>=80:
if e >= self.epoch*.1:
"""
Must run 1/4 of epochs.
Stopping Criteria:
T-test of sample means for parameter estimates and model loss with:
X1: Third quarter of parameter chain
X2: Fourth quarter of parameter chain
If all parameters and loss achieve convergence with 95% confidence:
Break
If the final Epoch is reached without some parameters or loss converging:
Deliver warning to increase epoch parameter
"""
for i in range(len(self.linear_feats)):
parameter_chain=np.array(self.coefs_per_epoch)[:,i]
X1= parameter_chain[int(e*.5):int(e*.75)]
X2= parameter_chain[int(e*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
if absT<=t.ppf(1-.05/2, v):
self.converged_.append(1)
else:
self.converged_.append(0)
parameter_chain=self.rmse_
X1= parameter_chain[int(e*.5):int(e*.75)]
X2= parameter_chain[int(e*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
if absT<=t.ppf(1-.05/2, v):
self.converged_.append(1)
else:
self.converged_.append(0)
"""
if absT<=t.ppf(1-.05/2, v):
if np.mean(self.converged_)!=1:
print("Warning: Some parameters may not have converged, perhaps increase epochs.")
break"""
#If all parameters converged, break; if last epoch is reached without convergence, produce warning
if np.mean(self.converged_)==1:
break
elif (np.mean(self.converged_)!=1)&(e==self.epoch-1):
print("Warning: Some parameters or Loss may not have converged, perhaps increase epochs.")
self.coef_means=pd.Series(np.mean(np.array(self.coefs_per_epoch)[int(self.epochs_completed_*.5):,],axis=0),index=self.linear_feats)
self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coef_means)
def predict(self,x):
#self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coef_means)
pred=x[self.linear_feats]@self.coef_means+self.mod.predict(x[self.other_feats])
return pred
def predict_last_coefs(self,x):
#self.mod.fit(self.x[self.other_feats],self.y-self.x[self.linear_feats]@self.coefs_)
pred=x[self.linear_feats]@self.coefs_+self.mod.predict(x[self.other_feats])
return pred
X['int']=1
xtest['int']=1
rmse_PR=[]
rmse_REG=[]
rmse_XGB=[]
r2_PR=[]
r2_REG=[]
r2_XGB=[]
for f in range(1,9):
xt=X[fold!=f].copy()
yt=Y[fold!=f]
xv=X[fold==f].copy()
yv=Y[fold==f]
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=.1,
epoch=500,
optimization='Gradient'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'int']
)
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
rmse_PR.append(rmse)
r2_PR.append(r2)
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
##Regression
coef=np.linalg.pinv(xt.T@xt)@(xt.T@yt)
yfit_regression=xt@coef
ypred_regression=xv@coef
coef_df=pd.DataFrame(
{'feat':xt.columns,
'coef':coef}
)
r2_regression=1-sum((yv-ypred_regression)**2)/sum((yv-np.mean(yv))**2)
rmse_regression=np.mean((yv-ypred_regression)**2)**.5
rmse_REG.append(rmse_regression)
r2_REG.append(r2_regression)
##XGB
xgb = mod.mod.fit(xt,yt)
ypred_xgb=pd.Series(xgb.predict(xv),index=yv.index)
r2_xgb=1-sum((yv-ypred_xgb)**2)/sum((yv-np.mean(yv))**2)
rmse_xgb=np.mean((yv-ypred_xgb)**2)**.5
rmse_XGB.append(rmse_xgb)
r2_XGB.append(r2_xgb)
cv_out=pd.DataFrame({'fold':range(1,9)})
cv_out['rmse_PR']=rmse_PR
cv_out['rmse_REG']=rmse_REG
cv_out['rmse_XGB']=rmse_XGB
cv_out['r2_PR']=r2_PR
cv_out['r2_REG']=r2_REG
cv_out['r2_XGB']=r2_XGB
print(np.round(np.mean(cv_out,axis=0),3))
#TEST
kpi='mpg'
feats=['cyl', 'disp', 'hp', 'weight', 'acc', 'yr', 'origin']
xt=X.copy()
yt=Y.copy()
xv=xtest.copy()
yv=ytest.copy()
"""
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=10,
epoch=2800,
optimization='Newtonian'
)
"""
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=.1,
epoch=1500,
optimization='Gradient'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'int']
)
#ypred=mod.predict_last_coefs(xv)
mod.coefs_
mod.converged_
mod.coefs_per_epoch
mod.epochs_completed_
#pd.DataFrame(round(mod.coef_means,2),columns=['Coefficient']).to_csv("downloads/pr_coef.csv")
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
for i in range(len(mod.linear_feats)):
plt.plot(np.array(mod.coefs_per_epoch)[:,i])
plt.title("Coefficient of "+mod.linear_feats[i])
plt.xlabel("Epoch")
plt.show()
plt.plot(mod.rmse_)
plt.title("Training RMSE")
plt.xlabel("Epoch")
plt.show()
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
ax1.plot(np.array(mod.coefs_per_epoch)[:,0])
ax1.set_title(mod.linear_feats[0])
ax1.set_xticklabels([])
ax2.plot(np.array(mod.coefs_per_epoch)[:,1])
ax2.set_title(mod.linear_feats[1])
ax2.set_xticklabels([])
ax3.plot(np.array(mod.coefs_per_epoch)[:,2])
ax3.set_title(mod.linear_feats[2])
ax3.set_xlabel("Epoch")
ax4.plot(mod.rmse_)
ax4.set_title("RMSE")
ax4.set_xlabel("Epoch")
plt.show()
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
converged = int(mod.epochs_completed_*.5)
ax1.hist(np.array(mod.coefs_per_epoch)[converged:,0])
ax1.set_title(mod.linear_feats[0])
ax2.hist(np.array(mod.coefs_per_epoch)[converged:,1])
ax2.set_title(mod.linear_feats[1])
ax3.hist(np.array(mod.coefs_per_epoch)[converged:,2])
ax3.set_xlabel(mod.linear_feats[2])
ax4.hist(mod.rmse_[converged:])
ax4.set_xlabel("RMSE")
plt.show()
#Testing Parameter Convergence
for i in range(len(mod.linear_feats)):
parameter_chain=np.array(mod.coefs_per_epoch)[:,i]
column=mod.linear_feats[i]
X1= parameter_chain[int(mod.epochs_completed_*.5):int(mod.epochs_completed_*.75)]
X2= parameter_chain[int(mod.epochs_completed_*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
print(column+" Converged: ",~(absT>t.ppf(1-.05/2, v)))
#Testing Parameter Convergence
parameter_chain=mod.rmse_
X1= parameter_chain[int(mod.epochs_completed_*.5):int(mod.epochs_completed_*.75)]
X2= parameter_chain[int(mod.epochs_completed_*.75):]
v=len(X1)+len(X2)-2
T=(np.mean(X1)-np.mean(X2))/((np.var(X1)/len(X1)+np.var(X2)/len(X2))**.5)
absT=abs(T)
print("RMSE Converged: ",~(absT>t.ppf(1-.05/2, v)))
##Regression
coef=np.linalg.pinv(xt.T@xt)@(xt.T@yt)
yfit_regression=xt@coef
ypred_regression=xv@coef
coef_df=pd.DataFrame(
{'feat':xt.columns,
'coef':coef}
)
round(coef_df,2).to_csv("coef_df.csv")
r2_regression=1-sum((yv-ypred_regression)**2)/sum((yv-np.mean(yv))**2)
rmse_regression=np.mean((yv-ypred_regression)**2)**.5
print("Regression R2,RSME: ",round(r2_regression,3),round(rmse_regression,2))
##XGB
xgb = mod.mod.fit(xt,yt)
ypred_xgb=pd.Series(xgb.predict(xv),index=yv.index)
r2_xgb=1-sum((yv-ypred_xgb)**2)/sum((yv-np.mean(yv))**2)
rmse_xgb=np.mean((yv-ypred_xgb)**2)**.5
print("XGB R2,RSME: ",round(r2_xgb,3),round(rmse_xgb,2))
"""
Testing high multicollinearity behavior. XGB is robust to this, regression is not.
Two features highly correlated are CYL and DISP.
"""
print('Looking at multicollinearity:')
#Multicollinearity
mod=mixed_model(
mod=XGBRegressor(n_estimators=25,
max_depth=6,
random_state=42),
lr=3,
epoch=3000,
optimization='Newtonian'
)
mod.fit(xt,yt,
linear_feats=['weight',
'disp',
'cyl','yr',
'int']
)
ypred=mod.predict(xv)
r2=1-sum((yv-ypred)**2)/sum((yv-np.mean(yv))**2)
rmse=np.mean((yv-ypred)**2)**.5
for i in range(len(mod.linear_feats)):
plt.plot(np.array(mod.coefs_per_epoch)[:,i])
plt.title("Coefficient of "+mod.linear_feats[i])
plt.xlabel("Epoch")
plt.show()
plt.plot(mod.rmse_)
plt.title("Training RMSE")
plt.xlabel("Epoch")
plt.show()
print("mixed model R2,RSME: ",round(r2,3),round(rmse,2))
mm_pr_coefs = | pd.DataFrame(mod.coef_means,columns=['Coef']) | pandas.DataFrame |
from textblob import TextBlob
import GetOldTweets3 as got
# Usados en este programa
import pandas as pd
from datetime import date, timedelta
import glob
from yahoo_historical import Fetcher
import numpy as np
# Usados en este programa
# DASK
import dask as dask
from dask.distributed import Client, progress
import dask.dataframe as dd
client = Client()
client
from dask import delayed
"""
#1
----- Información inicial-----
"""
#datos2 = pd.read_csv('tweets/1.5_years_26marzo/2019/2018-01-07_3tweets.csv',index_col=0)
sources = ['eleconomista', 'ElFinanciero_Mx','El_Universal_Mx']
keywords = ['america movil','banco de mexico', 'mexico', 'bmv', 'bolsa mexicana de valores', 'bolsa mexicana',
'ipc', 'gobierno de mexico',
'walmex','femsa','televisa', 'grupo mexico','banorte','cemex','grupo alfa',
'peñoles', 'inbursa', 'elektra', 'mexichem', 'bimbo', 'arca continental', 'kimberly-clark',
'genomma lab', 'puerto de liverpool', 'grupo aeroportuario', 'banco compartamos', 'alpek', 'ica',
'tv azteca', 'ohl', 'maseca', 'alsea', 'carso', 'lala', 'banregio', 'comercial mexicana',
'ienova', 'pinfra', 'santander mexico', 'presidente de mexico','cetes']
# - De entrada junto todos los .csv dentro de la carpeta 2019 en frame
path = 'tweets/1.5_years_26marzo/juntos' # use your path
all_files = glob.glob(path + "/*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0, )
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
# - Listo todas las fechas de las que supuestamente he obtenido tweets y comparo
# con lo que realmente descargué para obtener las fechas faltantes
d1 = date(2016, 1, 2) # start date, revisar que cuadre con quandl abajo
d2 = date(2019, 3, 26) # end date, revisar que cuadre con quandl abajo
delta = d2 - d1 # timedelta
dates=[]
for i in range(delta.days + 1):
j = str(d1 + timedelta(i))
dates.append(j)
fechas_real = list(set(frame['Date']))
fechas_faltantes = list(set(dates) - set(fechas_real))
# Transformo los DataFrames para recolectar la inforamción que necesito
## Intento 1: Cuento los tweets negativos/positivos por fuente (3)
por_fuente = pd.DataFrame()
for fuente in sources:
filter_col = [col for col in frame if col.startswith(fuente)]
filter_col.append('Date')
fr_int1 = frame[filter_col]
globals()['pos_%s' % fuente] = []
globals()['neg_%s' % fuente] = []
globals()['neu_%s' % fuente] = []
for fecha in fechas_real:
dframe = fr_int1.loc[fr_int1['Date'] == fecha]
positivo = dframe[dframe[filter_col] == 1].count().sum()
negativo = dframe[dframe[filter_col] == 2].count().sum()
neutro = dframe[dframe[filter_col] == 0].count().sum()
globals()['pos_%s' % fuente].append(positivo)
globals()['neg_%s' % fuente].append(negativo)
globals()['neu_%s' % fuente].append(neutro)
por_fuente[str(fuente+'_positivos')] = globals()['pos_%s' % fuente]
por_fuente[str(fuente+'_negativos')] = globals()['neg_%s' % fuente]
por_fuente[str(fuente+'_neutros')] = globals()['neu_%s' % fuente]
por_fuente['Date'] = fechas_real
por_fuente = por_fuente.set_index('Date')
## Intento 2: Cuento los tweets negativos/positivos por tema (41)
por_tema = | pd.DataFrame() | pandas.DataFrame |
"""
Code to merge several datasets to get full table(s) to be tranformed for ML
Reads data from the teams S3 bucket and merges them all together
This .py file simply merges the FIRMS data with WFIGS data.
FIRMS (Fire Information Resource Management System) exports of satellite fire dections can be optained in the following link:
https://firms.modaps.eosdis.nasa.gov/download/
WFIGS Data ( Wildland Fire Interagency Geospatial Services) data dcan be found in the following link:
https://data-nifc.opendata.arcgis.com/search?tags=Category%2Chistoric_wildlandfire_opendata
For team:
This version code was used to create FIRMSandSCANFull2018toApr2022.csv
BA_
"""
#general imports
import pandas as pd
import boto3
import geopy.distance
import numpy as np
"""
Pull from S3 and concat similar data to start.
"""
#TODO For Team: enter the credentails below to run
S3_Key_id=''
S3_Secret_key=''
def pull_data(Key_id, Secret_key, file):
"""
Function which CJ wrote to pull data from S3
"""
BUCKET_NAME = "gtown-wildfire-ds"
OBJECT_KEY = file
client = boto3.client(
's3',
aws_access_key_id= Key_id,
aws_secret_access_key= Secret_key)
obj = client.get_object(Bucket= BUCKET_NAME, Key= OBJECT_KEY)
file_df = pd.read_csv(obj['Body'])
return (file_df)
#read the csvs from the S3 using the pull_data function
print('Pulling data from S3 into dataframes...')
file = 'fire_archive_M-C61_268391.csv'
df_modis1 = pull_data(S3_Key_id, S3_Secret_key, file) #get MODIS data 1
file = 'fire_nrt_M-C61_268391.csv'
df_modis2 = pull_data(S3_Key_id, S3_Secret_key, file) #get MODIS data 2
file = 'fire_nrt_J1V-C2_268392.csv'
df_viirs = pull_data(S3_Key_id, S3_Secret_key, file) #get VIIRS data
file = 'USDAJan2018ToMar2022.csv'
df_usda = pull_data(S3_Key_id, S3_Secret_key, file) #get USDA data
file = 'WFIGS_Pulled5-5-2022.csv'
df_wfigs = pull_data(S3_Key_id, S3_Secret_key, file) #get WFIGS (small) data table
file = 'WFIGS_big_Pulled5-8-2022.csv'
df_wfigsbig = pull_data(S3_Key_id, S3_Secret_key, file) #get WFIGS (big) data table
print('Data pulled from S3 into dataframes')
#concatonate all of the FIRMS data
df_FIRMS = | pd.concat([df_modis1, df_modis2, df_viirs]) | pandas.concat |
__author__ = "<NAME>, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__license__ = "MIT"
import matplotlib.pyplot as plt
import numpy as np
import pandas
import pandas as pd
from matplotlib.ticker import NullFormatter
from idf_analysis import IntensityDurationFrequencyAnalyse
from idf_analysis.definitions import COL
from idf_analysis.little_helpers import duration_steps_readable, minutes_readable, frame_looper, event_caption
from idf_analysis.sww_utils import (guess_freq, rain_events, event_duration, resample_rain_series, rain_bar_plot,
agg_events, )
COL.MAX_SRI = 'max_SRI_{}'
COL.MAX_SRI_DURATION = 'max_SRI_duration_{}'
####################################################################################################################
def grisa_factor(tn):
"""
calculates the grisa-factor according to Grisa's formula
Args:
tn (float): in [years]
Returns:
float: factor
"""
return 1 + (np.log(tn) / np.log(2))
def next_bigger(v, l):
return l[next(x for x, val in enumerate(l) if val >= v)]
class SCHMITT:
# Zuweisung nach Schmitt des SRI über der Wiederkehrperiode
SRI_TN = {
1: 1,
2: 1,
3: 2,
5: 2,
10: 3,
20: 4,
25: 4,
30: 5,
50: 6,
75: 6,
100: 7
}
# Erhöhungsfaktoren nach Schmitt für SRI 8,9,10,11,12 basierend auf SRI 7
# untere und obere Grenze
MULTI_FACTOR = {
8: (1.2, 1.39),
9: (1.4, 1.59),
10: (1.6, 2.19),
11: (2.2, 2.78),
12: (2.8, 2.8),
}
VERBAL = {
(1, 2): 'Starkregen',
(3, 5): 'intensiver Starkregen',
(6, 7): 'außergewöhnlicher Starkregen',
(8, 12): 'extremer Starkregen'
}
INDICES_COLOR = {1: (0.69, 0.9, 0.1),
2: (0.8, 1, 0.6),
3: (0.9, 1, 0.3),
4: (1, 0.96, 0),
5: (1, 0.63, 0),
6: (1, 0.34, 0),
7: (1, 0.16, 0),
8: (0.97, 0.12, 0.24),
9: (1, 0.10, 0.39),
10: (0.97, 0.03, 0.51),
11: (0.92, 0.08, 0.75),
12: (0.66, 0.11, 0.86)}
INDICES_COLOR_RGB = {1: (176, 230, 25),
2: (204, 255, 153),
3: (230, 255, 77),
4: (255, 244, 0),
5: (255, 160, 0),
6: (255, 86, 0),
7: (255, 40, 0),
8: (247, 30, 61),
9: (255, 26, 99),
10: (247, 9, 130),
11: (235, 21, 191),
12: (189, 28, 220)}
INDICES_COLOR_HEX = {1: "#b0e619",
2: "#ccff99",
3: "#e6ff4d",
4: "#fff400",
5: "#ffa000",
6: "#ff5600",
7: "#ff2800",
8: "#f71e3d",
9: "#ff1a63",
10: "#f70982",
11: "#eb15bf",
12: "#bd1cdc"}
krueger_pfister_verbal = {
(1, 4): 'moderat',
(5, 7): 'stark',
(8, 10): 'heftig',
(11, 12): 'extrem'
}
grisa_verbal = {
(1, 2): 'Minor',
(3, 4): 'Moderate',
(5, 6): 'Major',
(7, 8): 'Extreme',
(9, 10): 'Catastrophic'
}
def cat_dict(cat):
res = {}
for num_range, verbal in cat.items():
for i in range(num_range[0], num_range[1]+1):
res[i] = verbal
return res
####################################################################################################################
class HeavyRainfallIndexAnalyse(IntensityDurationFrequencyAnalyse):
indices = list(range(1, 13))
class METHODS:
SCHMITT = 'Schmitt'
KRUEGER_PFISTER = 'KruegerPfister'
MUDERSBACH = 'Mudersbach'
@classmethod
def all(cls):
return cls.SCHMITT, cls.KRUEGER_PFISTER, cls.MUDERSBACH
indices_color = SCHMITT.INDICES_COLOR
def __init__(self, *args, method=METHODS.SCHMITT, **kwargs):
IntensityDurationFrequencyAnalyse.__init__(self, *args, **kwargs)
self.method = method
self._sri_frame = None
def set_series(self, series):
IntensityDurationFrequencyAnalyse.set_series(self, series)
self._sri_frame = None
def get_sri(self, height_of_rainfall, duration):
"""
calculate the heavy rain index (StarkRegenIndex), when the height of rainfall and the duration are given
Args:
height_of_rainfall (float): in [mm]
duration (int | float | list | numpy.ndarray | pandas.Series): in minutes
Returns:
int | float | list | numpy.ndarray | pandas.Series: heavy rain index
"""
tn = self.get_return_period(height_of_rainfall, duration)
if self.method == self.METHODS.MUDERSBACH:
if isinstance(tn, (pd.Series, np.ndarray)):
sri = np.round(1.5 * np.log(tn) + 0.4 * np.log(duration), 0)
sri[tn <= 1] = 1
sri[tn >= 100] = 12
return sri
else:
if tn <= 1:
return 1
elif tn >= 100:
return 12
else:
return np.round(1.5 * np.log(tn) + 0.4 * np.log(duration), 0)
elif self.method == self.METHODS.SCHMITT:
if isinstance(tn, (pd.Series, np.ndarray)):
breaks = [-np.inf] + list(SCHMITT.SRI_TN.keys()) + [np.inf]
d = dict(zip(range(11), SCHMITT.SRI_TN.values()))
sri = pd.cut(tn, breaks, labels=False).replace(d)
over_100 = tn > 100
hn_100 = self.depth_of_rainfall(duration, 100)
breaks2 = [1] + [f[0] for f in SCHMITT.MULTI_FACTOR.values()][1:] + [np.inf]
d2 = dict(zip(range(len(breaks2) - 1), range(8, 13)))
sri.loc[over_100] = pd.cut(height_of_rainfall.loc[over_100] / hn_100, breaks2, labels=False).replace(d2)
else:
if tn >= 100:
hn_100 = self.depth_of_rainfall(duration, 100)
for sri, mul in SCHMITT.MULTI_FACTOR.items():
if height_of_rainfall <= hn_100 * mul[0]:
break
else:
sri = SCHMITT.SRI_TN[next_bigger(tn, list(SCHMITT.SRI_TN.keys()))]
elif self.method == self.METHODS.KRUEGER_PFISTER:
h_24h = self.depth_of_rainfall(duration=24 * 60, return_period=tn)
hn_100 = self.depth_of_rainfall(duration=duration, return_period=100)
duration_adjustment_factor = height_of_rainfall / h_24h
intensity_adjustment_factor = height_of_rainfall / hn_100
sri = grisa_factor(tn) * duration_adjustment_factor * intensity_adjustment_factor
if isinstance(sri, (pd.Series, np.ndarray)):
sri[tn < 0.5] = 0
else:
if tn < 0.5:
return 0
return np.clip(np.ceil(sri), 0, 12)
else:
raise NotImplementedError(f'Method {self.method} not implemented!')
return sri
# __________________________________________________________________________________________________________________
def result_sri_table(self, durations=None):
"""
get a standard idf table of rainfall depth with return periods as columns and durations as rows
Args:
durations (list | numpy.ndarray | None): list of durations in minutes for the table
Returns:
pandas.DataFrame: idf table
"""
idf_table = self.result_table(durations)
if self.method == self.METHODS.SCHMITT:
sri_table = idf_table.rename(columns=SCHMITT.SRI_TN)
for sri, mul in SCHMITT.MULTI_FACTOR.items():
sri_table[sri] = mul[1] * sri_table[7]
sri_table = sri_table.loc[:, ~sri_table.columns.duplicated('last')]
elif self.method == self.METHODS.MUDERSBACH:
# zuerst eine Tabelle mit den Wiederkehrperioden
rp_table = pd.DataFrame(index=idf_table.index, columns=range(1, 13))
# abhängigkeit nach dauerstufe
a = np.log(rp_table.index.values) * 0.4
for sri in rp_table.columns:
rp_table[sri] = np.exp((sri + 0.5 - a) / 1.5)
rp_table.loc[:, 1] = 1
# rp_table.loc[:, 12] = 100
# dann mittels Dauerstufe und Wiederkehrperiode die Regenhöhe errechnen
sri_table = rp_table.round(1).copy()
for dur in rp_table.index:
sri_table.loc[dur] = self.depth_of_rainfall(dur, rp_table.loc[dur])
# extrapolation vermutlich nicht sehr seriös
sri_table[rp_table >= 100] = np.NaN
# sri_table.loc[:12] = self.depth_of_rainfall(sri_table.index.values, 100)
sri_table[rp_table < 1] = np.NaN
sri_table = sri_table.astype(float).round(2)
sri_table = sri_table.fillna(method='ffill', axis=1, limit=None)
elif self.method == self.METHODS.KRUEGER_PFISTER:
# duration_adjustment_factor = idf_table.div(idf_table.loc[24 * 60])
# intensity_adjustment_factor = idf_table.div(idf_table[100].values, axis=0)
# sri_table = grisa_factor(
# idf_table.columns.values) * duration_adjustment_factor * intensity_adjustment_factor
# sri_table = sri_table.round().astype(int).clip(0,12)
sri_table = | pd.DataFrame(index=idf_table.index) | pandas.DataFrame |
import numpy as np
import os.path
import pandas as pd
import sys
import math
# find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
# print(sys.path)
# print(os.path)
class LeslieProbitInputs(ModelSharedInputs):
"""
Input class for LeslieProbit.
"""
def __init__(self):
"""Class representing the inputs for LeslieProbit"""
super(LeslieProbitInputs, self).__init__()
# self.a_n = pd.Series([], dtype="object")
# self.c_n = pd.Series([], dtype="object")
self.grass_type = pd.Series([], dtype="object")
self.percent_active_ingredient = pd.Series([], dtype="float")
self.foliar_half_life = pd.Series([], dtype="float")
self.sol = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
self.number_applications = pd.Series([], dtype="float")
self.application_rates = pd.Series([], dtype="float")
self.application_days = pd.Series([], dtype="float")
self.b = pd.Series([], dtype="float")
self.test_species = pd.Series([], dtype="object")
self.ld50_test = pd.Series([], dtype="float")
# self.bw_tested = pd.Series([], dtype="float")
# self.ass_species = pd.Series([], dtype="object")
# self.bw_ass = pd.Series([], dtype="float")
self.mineau_scaling_factor = pd.Series([], dtype="float")
self.probit_gamma = pd.Series([], dtype="float")
self.init_pop_size = pd.Series([], dtype="float")
self.stages = | pd.Series([], dtype="float") | pandas.Series |
from os import path
import pandas as pd
import numpy as np
import dateparser
import datetime as dt
from enum import Enum
'''Tools to import both endogeneous and exogeneous data based on known file types.
For each type of data (endogeneous and exogeneous) there is an importer class containing
a single factory method to import data of a particular format. Valid formats are
contained in an enum and their specifications can be found in the Readme for this package.
Typical usage examples:
foo = EndogeneousDataImporter.import_endogeneous(filename, endogeneous_var, EndogeneousDataFormats.format_name)
bar = ExogeneousDataImporter.import_exogeneous(filename, ExogeneousDataFormats.format_name)
'''
class ImportError(Exception):
'''
Errors relating to importing datasets from known filetypes.
'''
class EndogeneousDataFormats(Enum):
'''
Enumerates allowed file formats for endogeneous data import.
'''
cmpc_financial_data = 'cmpc_financial_data'
excel_table = 'excel_table'
class ExogeneousDataFormats(Enum):
'''
Enumerates allowed file formats for exogeneous data import.
'''
macro = 'macro'
class EndogeneousDataImporter:
'''Import endogeneous data'''
def import_endogeneous(file, endogeneous_var : str, format : EndogeneousDataFormats) -> pd.Series:
'''
Factory method for importing time indexed endogeneous data.
Keyword arugments:
file -- file-like or file path; the endogeneous data.
Requires the to file conforms to format as specified in the Readme.
endogeneous_var -- The name of the endogeneous var.
format -- The format of the endogeneous data.
Returns:
A series of endogeneous_var of type float64, indexed by period and sorted in increasing date order,
where no entry is null, no period is duplicated, and no intermediate period is missing.
'''
if format == EndogeneousDataFormats.cmpc_financial_data:
func = import_cmpc_financial_data
elif format == EndogeneousDataFormats.excel_table:
func = import_excel_table
else:
raise ValueError('Endogeneous data format not recognized')
try:
data = func(file, endogeneous_var)
except:
raise ImportError('Error importing file. Check file format, endogeneous_var name or format selection.')
EndogeneousDataImporter._check_import(data)
return data
def _check_import(data : pd.Series):
'''Asserts the postcondition of import_endogeneous.'''
# if shape is len 1, 1-dimensional array, only 1 col
assert len(data.shape) == 1
assert not np.any(data.isnull())
# TODO refactor in utility method to DRY
assert not np.any(data.index.duplicated())
# Every quarterly observation between max and min filled
# if length 0 index, then satisfied since no max or min
# can't test since .max() of empty index is pd.NaT
if not len(data.index) == 0:
time_delta = data.index.max() - data.index.min()
# should be a +1 on time delta since we have 40 periods, but observation at beginning of first period
# example: quarter 1-quarter 0 =1, although we have 2 quarters
assert len(data.index) == time_delta.n +1
def import_excel_table(file, endogeneous_var : str) -> pd.Series:
'''
Converts an excel table to a DataFrame.
See spec for EndogeneousDataImporter.import_endogeneous()
'''
# import macro vars
endog_db = pd.read_excel(file, parse_dates=False)
# endog_db = endog_db.rename(columns = {"Unnamed: 0": "Year"})
# print(endog_db)
# Melt defaults to using all columns for unpivot
endog_db = endog_db.melt(id_vars=['Year'], var_name='Month', value_name=endogeneous_var)
endog_db = endog_db.astype({'Year' : 'str', 'Month' : 'str'})
endog_db.loc[:,'Month'] = endog_db.loc[:, 'Month'].str.upper()
endog_db.loc[:, 'Date'] = endog_db.loc[:,'Year'] + endog_db.loc[:,'Month']
# could parse period instead in future version
# TODO is last the right thing to do?
endog_db.loc[:, 'Date'] = endog_db.loc[:, 'Date'].apply(lambda x: dateparser.parse(x, settings={'PREFER_DAY_OF_MONTH' : 'last'}))
endog_db = endog_db.set_index("Date")
endog_db.index = pd.to_datetime(endog_db.index)
endog_db = endog_db.loc[:,endogeneous_var]
endog_db = endog_db.dropna()
endog_db.index = endog_db.index.to_period('M')
endog_db = endog_db.astype('float64')
endog_db = endog_db.sort_index()
return endog_db
def parse_quarter_to_period(quarter : str) -> pd.Period:
'''
Converts a string of the form QX YY to period.
Keyword arguments:
quarter -- String of the format 'QX YY' representing the period quarter X of the most
recently ocurring year ending in YY.
Returns:
The corresponding period.
'''
century = 100 #years
quarter, year = quarter.split()
current_year = int(dt.datetime.today().year)
current_century = (current_year // century) * century
if int(year) + current_century > current_year:
four_digit_year = current_century -century + int(year)
else:
four_digit_year = current_century + int(year)
quarter_num = int(quarter[1])
assert 1 <= quarter_num <= 4
return pd.Period(freq = 'Q', quarter=quarter_num, year=four_digit_year)
def import_cmpc_financial_data(file, endogeneous_var : str) -> pd.Series:
'''
Converts a csv CMPC Financial Data dataset to a DataFrame. Designed to work with csv files created
from the XLSX format available here:
http://apps.indigotools.com/IR/IAC/?Ticker=CMPC&Exchange=SANTIAGO,
although an individual csv sheet must be made, and negative numbers must be formatted without parentheses or commas.
Example: -123456.78 not (123,456.78)
See spec for EndogeneousDataImporter.import_endogenous()
'''
assert endogeneous_var != ''
financial_statement_db = pd.read_csv(file,
parse_dates=True,
na_values=['', '-', ' '], # endogeneous can't be '' due to treatment as null
skip_blank_lines=True) # many blank rows in CMPC download
# Massage data into correct format
# Description is anchor point for col containing endogeneous and row containing quarters
# index is now vars, including endogeneous, and columns are now quarters plus other columns (ex. 'Currency', 'Unit')
financial_statement_db = financial_statement_db.set_index('Description')
# filter columns based on regex
financial_statement_db = financial_statement_db.filter(regex='Q[1-4]\s\d\d')
# Columns are now vars, including endogeneous, rows are quarters
financial_statement_db = financial_statement_db.transpose()
# Set index
financial_statement_db.index = financial_statement_db.index.map(parse_quarter_to_period)
financial_statement_db.index = pd.PeriodIndex(financial_statement_db.index, freq='Q')
# Drop vars other than endogeneous
financial_statement_db = financial_statement_db.loc[:,endogeneous_var]
financial_statement_db = financial_statement_db.fillna(0.0)
financial_statement_db = financial_statement_db.astype('float64')
# sort index by time
financial_statement_db = financial_statement_db.sort_index()
return financial_statement_db
class ExogeneousDataImporter:
'''Import exogeneous data'''
def import_exogeneous(file, format : ExogeneousDataFormats) -> pd.DataFrame:
'''
Factory method to import time indexed exogeneous data.
Keyword arguments:
file -- file-like or path; the endogeneous data.
Requires that the file conforms to format according to the spec for the format found in the Readme.
format -- The name of the file format.
Returns:
A Pandas DataFrame indexed by date where each column contains a unique exogeneous variabe in float64 format.
The dataframe contains no null values. Nulls in the imported file will be treated as follows:
- Nulls occurring prior to existing data will be backfilled.
- Any nulls between the last date in the index and the most recent observation of that variable will
be forward filled.
- Exogeneous variables with no corresponding data (all nulls) will be dropped.
Data will be sorted in ascending order by date.
'''
if format == ExogeneousDataFormats.macro:
func = import_macro
else:
raise ValueError("Exogneous variable format not recognized.")
try:
data = func(file)
except:
raise ImportError('Error importing file. Check file format, endogeneous_var name or format selection.')
ExogeneousDataImporter._check_import(data)
return data
def _check_import(data : pd.DataFrame):
'''Asserts the postcondition on import_exogeneous.'''
# assert no null values after bfill and ffill
assert not np.any(data.isnull())
def import_macro(file) -> pd.DataFrame:
'''
Converts an XLSX macroeconomic dataset to a DataFrame.
See spec for factory method ExogeneousDataImporter.import_exogeneous()
'''
# import vars
exog_db = pd.read_excel(file, sheet_name="BBG P", skiprows=[0,1,2,4,5], index_col = 0, parse_dates=True)
# change empty string col name back to empty string
exog_db = exog_db.rename(columns = {"Unnamed: 1": ""})
# should only have one empty string col name, so if there exists unnamed 1 precondition of unique columns not met
assert not 'Unnamed: 2' in exog_db.columns, 'Only one column label may be empty string'
# format index
# drop empty index entries
exog_db = exog_db[exog_db.index.notnull()]
exog_db.index = | pd.to_datetime(exog_db.index) | pandas.to_datetime |
import io
import os
import time
import re
import string
from PIL import Image, ImageFilter
import requests
import numpy as np
import pandas as pd
from scipy.fftpack import fft
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# import sklearn.metrics as sm
from keras.preprocessing import image
from keras.applications.inception_v3 \
import decode_predictions, preprocess_input
from keras.applications.inception_v3 import InceptionV3
class Unicorn():
def __init__(self, weights_path):
self.cnn_features = True
self.target_size = (299, 299)
self.alpha_fill = '#ffffff'
self.prep_func = preprocess_input
self.scale_features = True
self.n_clusters = 4
self.n_pca_comps = 10
self.model = InceptionV3(weights=weights_path)
def load_image(self, img_path):
''' load image given path and convert to an array
'''
img = image.load_img(img_path, target_size=self.target_size)
x = image.img_to_array(img)
return self.prep_func(x)
def load_image_from_web(self, image_url):
''' load an image from a provided hyperlink
'''
# get image
response = requests.get(image_url)
with Image.open(io.BytesIO(response.content)) as img:
# fill transparency if needed
if img.mode in ('RGBA', 'LA'):
img = self.strip_alpha_channel(img)
# convert to jpeg
if img.format is not 'jpeg':
img = img.convert('RGB')
img.save('target_img.jpg')
def validate_url(self, url):
''' takes input string and returns True if string is
a url.
'''
url_validator = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return bool(url_validator.match(url))
def featurize_image(self, image_array):
''' Returns binary array with ones where the model predicts that
the image contains an instance of one of the target classes
(specified by wordnet id)
'''
predictions = self.model.predict(image_array)
return predictions
def strip_alpha_channel(self, image):
''' Strip the alpha channel of an image and fill with fill color
'''
background = Image.new(image.mode[:-1], image.size, self.alpha_fill)
background.paste(image, image.split()[-1])
return background
def fft_images(self, image_paths):
''' Returns the fft transform of images from paths provided as a list
'''
num_images = len(image_paths)
feature_data = pd.DataFrame()
for i, image_path in enumerate(image_paths):
try:
if self.validate_url(image_path):
filename = 'target_img.jpg'
self.load_image_from_web(image_path)
else:
filename = image_path
if i % 10 == 0:
print('processing image {}/{}'.format(i + 1, num_images))
X = np.array([self.load_image(filename)])
# # # flatten and apply fft
image_features = fft(X.flatten())
if filename == 'target_img.jpg':
os.remove('target_img.jpg')
feature_data = feature_data.append(
| pd.Series(image_features) | pandas.Series |
import pandas as pd
import numpy as np
import os
os.chdir ('Data')
#2016 data
df2016 = pd.read_csv('2016.CSV', low_memory=False)
df2016.columns = [
'year',
'respondent',
'agency',
'loan_type',
'property_type',
'loan_purpose',
'occupancy',
'loan_amount',
'preapproval',
'action_type',
'msa_md',
'state_code',
'county_code',
'census_tract_number',
'applicant_ethnicity',
'co_applicant_ethnicity',
'applicant_race_1',
'applicant_race_2',
'applicant_race_3',
'applicant_race_4',
'applicant_race_5',
'co_applicant_race_1',
'co_applicant_race_2',
'co_applicant_race_3',
'co_applicant_race_4',
'co_applicant_race_5',
'applicant_sex',
'co_applicant_sex',
'applicant_income',
'purchaser_type',
'denial_reason_1',
'denial_reason_2',
'denial_reason_3',
'rate_spread',
'hoepa_status',
'lien_status',
'edit_status',
'sequence_number',
'population',
'minority_population',
'hud_median_family_income',
'tract_to_msa',
'number_of_owner_occupied_units',
'number_of_family_units',
'application_date_indicator']
institutions2016 = pd.read_csv('2016Institutions.csv', low_memory=False, encoding = 'latin1')
institutions2016.columns = ['drop', 'respondent', 'agency', 'panel_name', 'transmittal_name', 'lar_count']
dic2016 = dict(zip(institutions2016.respondent, institutions2016.panel_name))
df2016['panel_name'] = df2016['respondent'].map(dic2016)
#2015 data
df2015 = pd.read_csv('2015.CSV', low_memory=False)
df2015.columns = [
'year',
'respondent',
'agency',
'loan_type',
'property_type',
'loan_purpose',
'occupancy',
'loan_amount',
'preapproval',
'action_type',
'msa_md',
'state_code',
'county_code',
'census_tract_number',
'applicant_ethnicity',
'co_applicant_ethnicity',
'applicant_race_1',
'applicant_race_2',
'applicant_race_3',
'applicant_race_4',
'applicant_race_5',
'co_applicant_race_1',
'co_applicant_race_2',
'co_applicant_race_3',
'co_applicant_race_4',
'co_applicant_race_5',
'applicant_sex',
'co_applicant_sex',
'applicant_income',
'purchaser_type',
'denial_reason_1',
'denial_reason_2',
'denial_reason_3',
'rate_spread',
'hoepa_status',
'lien_status',
'edit_status',
'sequence_number',
'population',
'minority_population',
'hud_median_family_income',
'tract_to_msa',
'number_of_owner_occupied_units',
'number_of_family_units',
'application_date_indicator']
institutions2015 = pd.read_csv('2015Institutions.csv', low_memory=False, encoding = 'latin1')
institutions2015.columns = ['drop', 'respondent', 'agency', 'panel_name', 'transmittal_name', 'lar_count']
dic2015 = dict(zip(institutions2015.respondent, institutions2015.panel_name))
df2015['panel_name'] = df2015['respondent'].map(dic2015)
#2014 data
df2014 = pd.read_csv('2014.CSV', low_memory=False)
df2014.columns = [
'year',
'respondent',
'agency',
'loan_type',
'property_type',
'loan_purpose',
'occupancy',
'loan_amount',
'preapproval',
'action_type',
'msa_md',
'state_code',
'county_code',
'census_tract_number',
'applicant_ethnicity',
'co_applicant_ethnicity',
'applicant_race_1',
'applicant_race_2',
'applicant_race_3',
'applicant_race_4',
'applicant_race_5',
'co_applicant_race_1',
'co_applicant_race_2',
'co_applicant_race_3',
'co_applicant_race_4',
'co_applicant_race_5',
'applicant_sex',
'co_applicant_sex',
'applicant_income',
'purchaser_type',
'denial_reason_1',
'denial_reason_2',
'denial_reason_3',
'rate_spread',
'hoepa_status',
'lien_status',
'edit_status',
'sequence_number',
'population',
'minority_population',
'hud_median_family_income',
'tract_to_msa',
'number_of_owner_occupied_units',
'number_of_family_units',
'application_date_indicator']
institutions2014 = pd.read_csv('2014Institutions.csv', low_memory=False, encoding = 'latin1')
institutions2014.columns = ['drop', 'respondent', 'agency', 'panel_name', 'transmittal_name', 'lar_count']
dic2014 = dict(zip(institutions2014.respondent, institutions2014.panel_name))
df2014['panel_name'] = df2014['respondent'].map(dic2014)
#combines and exports to a csv
frames = [df2014, df2015, df2016]
df = | pd.concat(frames) | pandas.concat |
# Function 0
def cleaning_func_0(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['90day_worse_rating'] = np.where(loan['mths_since_last_major_derog'].isnull(), 0, 1)
return loan
#=============
# Function 1
def cleaning_func_1(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['revol_util'] = loan['revol_util'].fillna(loan['revol_util'].median())
return loan
#=============
# Function 2
def cleaning_func_2(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['emp_title'] = np.where(loan['emp_title'].isnull(), 'Job title not given', loan['emp_title'])
return loan
#=============
# Function 3
def cleaning_func_3(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['acc_now_delinq'] = np.where(loan['acc_now_delinq'].isnull(), 0, loan['acc_now_delinq'])
return loan
#=============
# Function 4
def cleaning_func_4(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['delinq_2yrs'] = np.where(loan['delinq_2yrs'].isnull(), 0, loan['delinq_2yrs'])
return loan
#=============
# Function 5
def cleaning_func_5(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_coll_amt'] = loan['tot_coll_amt'].fillna(loan['tot_coll_amt'].median())
return loan
#=============
# Function 6
def cleaning_func_6(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['title'] = np.where(loan['title'].isnull(), 0, loan['title'])
return loan
#=============
# Function 7
def cleaning_func_7(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_rev_hi_lim'] = loan['total_rev_hi_lim'].fillna(loan['total_rev_hi_lim'].median())
return loan
#=============
# Function 8
def cleaning_func_8(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['inq_last_6mths'] = np.where(loan['inq_last_6mths'].isnull(), 0, loan['inq_last_6mths'])
return loan
#=============
# Function 9
def cleaning_func_9(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['total_acc'] = np.where(loan['total_acc'].isnull(), 0, loan['total_acc'])
return loan
#=============
# Function 10
def cleaning_func_10(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['annual_inc'] = loan['annual_inc'].fillna(loan['annual_inc'].median())
return loan
#=============
# Function 11
def cleaning_func_11(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['open_acc'] = np.where(loan['open_acc'].isnull(), 0, loan['open_acc'])
return loan
#=============
# Function 12
def cleaning_func_12(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['collections_12_mths_ex_med'] = np.where(loan['collections_12_mths_ex_med'].isnull(), 0, loan['collections_12_mths_ex_med'])
return loan
#=============
# Function 13
def cleaning_func_13(loan):
# core cleaning code
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['tot_cur_bal'] = loan['tot_cur_bal'].fillna(loan['tot_cur_bal'].median())
return loan
#=============
# Function 14
def cleaning_func_14(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['pub_rec'] = np.where(loan['pub_rec'].isnull(), 0, loan['pub_rec'])
return loan
#=============
# Function 15
def cleaning_func_15(loan):
# core cleaning code
import numpy as np
import pandas as pd
# loan = pd.read_csv('../input/loan.csv', low_memory=False)
loan['mths_since_last_delinq'] = np.where(loan['mths_since_last_delinq'].isnull(), 188, loan['mths_since_last_delinq'])
return loan
#=============
# Function 16
def cleaning_func_0(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['pct_paid'] = (loan.out_prncp / loan.loan_amnt)
return loan
#=============
# Function 17
def cleaning_func_1(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_mo'] = loan.issue_d.str[slice(0, 3, None)]
return loan
#=============
# Function 18
def cleaning_func_2(ld):
# core cleaning code
import pandas as pd
# ld = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=True)
pct_full = (ld.count() / len(ld))
names = list(pct_full[(pct_full > 0.75)].index)
loan = ld[names]
loan['issue_year'] = loan.issue_d.str[slice(4, None, None)]
return loan
#=============
# Function 19
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['bad_loan'] = 0
return data
#=============
# Function 20
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
bad_indicators = ['Charged Off ', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Default Receiver', 'Late (16-30 days)', 'Late (31-120 days)']
data.loc[(data.loan_status.isin(bad_indicators), 'bad_loan')] = 1
return data
#=============
# Function 21
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
return data
#=============
# Function 22
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['month'] = data['issue_dt'].dt.month
return data
#=============
# Function 23
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv')
data['issue_dt'] = pd.to_datetime(data.issue_d)
data['year'] = data['issue_dt'].dt.year
return data
#=============
# Function 24
def cleaning_func_0(loans):
# core cleaning code
import pandas as pd
date = ['issue_d', 'last_pymnt_d']
cols = ['issue_d', 'term', 'int_rate', 'loan_amnt', 'total_pymnt', 'last_pymnt_d', 'sub_grade', 'grade', 'loan_status']
# loans = pd.read_csv('../input/loan.csv', low_memory=False, parse_dates=date, usecols=cols, infer_datetime_format=True)
latest = loans['issue_d'].max()
finished_bool = (((loans['issue_d'] < (latest - pd.DateOffset(years=3))) & (loans['term'] == ' 36 months')) | ((loans['issue_d'] < (latest - pd.DateOffset(years=5))) & (loans['term'] == ' 60 months')))
finished_loans = loans.loc[finished_bool]
finished_loans['roi'] = (((finished_loans.total_pymnt / finished_loans.loan_amnt) - 1) * 100)
return finished_loans
#=============
# Function 25
def cleaning_func_0(df):
# core cleaning code
import pandas as pd
badLoan = ['Charged Off', 'Default', 'Late (31-120 days)', 'Late (16-30 days)', 'In Grace Period', 'Does not meet the credit policy. Status:Charged Off']
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
df['isBad'] = [(1 if (x in badLoan) else 0) for x in df.loan_status]
return df
#=============
# Function 26
def cleaning_func_4(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'Num_Loans']
return perStatedf
#=============
# Function 27
def cleaning_func_5(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
return df.groupby('addr_state', as_index=False).count()
#=============
# Function 28
def cleaning_func_6(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf.columns = ['State', 'loan_amt']
return perStatedf
#=============
# Function 29
def cleaning_func_8(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf.columns = ['State', 'badLoans']
return perStatedf
#=============
# Function 30
def cleaning_func_10(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
perStatedf.columns = ['State', 'totalLoans']
return perStatedf
#=============
# Function 31
def cleaning_func_14(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 32
def cleaning_func_15(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.Num_Loans / perStatedf.Pop)
return perStatedf
#=============
# Function 33
def cleaning_func_16(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 34
def cleaning_func_17(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_amnt', ascending=False)
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return statePopdf
#=============
# Function 35
def cleaning_func_18(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
return perStatedf
#=============
# Function 36
def cleaning_func_19(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='loan_amnt', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.loan_amt / perStatedf.Pop)
return perStatedf
#=============
# Function 37
def cleaning_func_20(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
return df.groupby('addr_state', as_index=False).sum()
#=============
# Function 38
def cleaning_func_21(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_amnt', 'addr_state'])
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
return pd.DataFrame.from_dict(statePop, orient='index')
#=============
# Function 39
def cleaning_func_23(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
return perStatedf
#=============
# Function 40
def cleaning_func_24(df):
# core cleaning code
import pandas as pd
statePop = {'CA': 39144818, 'TX': 27469144, 'FL': 20271878, 'NY': 19795791, 'IL': 12859995, 'PA': 12802503, 'OH': 11613423, 'GA': 10214860, 'NC': 10042802, 'MI': 9922576, 'NJ': 8958013, 'VA': 8382993, 'WA': 7170351, 'AZ': 6828065, 'MA': 6794422, 'IN': 6619680, 'TN': 6600299, 'MO': 6083672, 'MD': 6006401, 'WI': 5771337, 'MN': 5489594, 'CO': 5456574, 'SC': 4896146, 'AL': 4858979, 'LA': 4670724, 'KY': 4425092, 'OR': 4028977, 'OK': 3911338, 'CT': 3890886, 'IA': 3123899, 'UT': 2995919, 'MS': 2992333, 'AK': 2978204, 'KS': 2911641, 'NV': 2890845, 'NM': 2085109, 'NE': 1896190, 'WV': 1844128, 'ID': 1654930, 'HI': 1431603, 'NH': 1330608, 'ME': 1329328, 'RI': 1053298, 'MT': 1032949, 'DE': 945934, 'SD': 858469, 'ND': 756927, 'AK': 738432, 'DC': 672228, 'VT': 626042, 'WY': 586107}
statePopdf = pd.DataFrame.from_dict(statePop, orient='index').reset_index()
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf = pd.merge(perStatedf, statePopdf, on=['State'], how='inner')
perStatedf['PerCaptia'] = (perStatedf.badLoans / perStatedf.Pop)
return perStatedf
#=============
# Function 41
def cleaning_func_27(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
return perStatedf
#=============
# Function 42
def cleaning_func_28(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
badLoansdf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
perStatedf = pd.merge(perStatedf, badLoansdf, on=['State'], how='inner')
perStatedf['percentBadLoans'] = ((perStatedf.badLoans / perStatedf.totalLoans) * 100)
return perStatedf
#=============
# Function 43
def cleaning_func_29(df):
# core cleaning code
import pandas as pd
# df = pd.read_csv('../input/loan.csv', usecols=['loan_status', 'addr_state'])
perStatedf = df.groupby('addr_state', as_index=False).count().sort_values(by='loan_status', ascending=False)
badLoansdf = df.groupby('addr_state', as_index=False).sum().sort_values(by='isBad', ascending=False)
return badLoansdf
#=============
# Function 44
def cleaning_func_0(loan):
# core cleaning code
import pandas as pd
from collections import Counter
# loan = pd.read_csv('../input/loan.csv')
loan = loan[(loan.loan_status != 'Current')]
c = Counter(list(loan.loan_status))
mmp = {x[0]: 1 for x in c.most_common(20)}
loan['target'] = loan['loan_status'].map(mmp)
return loan
#=============
# Function 45
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_credit_pull_d = pd.to_datetime(data.last_credit_pull_d)
return data
#=============
# Function 46
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
return data
#=============
# Function 47
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.next_pymnt_d = | pd.to_datetime(data.next_pymnt_d) | pandas.to_datetime |
"""
Provides processing functions for CRSP data.
"""
from pilates import wrds_module
import pandas as pd
import numpy as np
import numba
from sklearn.linear_model import LinearRegression
class crsp(wrds_module):
def __init__(self, d):
wrds_module.__init__(self, d)
# Initialize values
self.col_id = 'permno'
self.col_date = 'date'
self.key = [self.col_id, self.col_date]
# For link with COMPUSTAT
self.linktype = ['LU', 'LC', 'LS']
# self.linkprim = ['P', 'C']
# Default data frequency
self.freq = 'M'
def set_frequency(self, frequency):
if frequency in ['Monthly', 'monthly', 'M', 'm']:
self.freq = 'M'
self.sf = self.msf
self.si = self.msi
elif frequency in ['Daily', 'daily', 'D', 'd']:
self.freq = 'D'
self.sf = self.dsf
self.si = self.dsi
else:
raise Exception('CRSP data frequency should by either',
'Monthly or Daily')
def permno_from_gvkey(self, data):
""" Returns CRSP permno from COMPUSTAT gvkey.
This code is insired from WRDS sample program 'merge_funda_crsp_byccm.sas'
available on the WRDS website.
Arguments:
data -- User provided data.
Required columns: [gvkey, datadate]
link_table -- WRDS provided linktable (ccmxpf_lnkhist)
linktype -- Default: [LC, LU]
linkprim -- Default: [P, C]
"""
# Columns required from data
key = ['gvkey', 'datadate']
# Columns required from the link table
cols_link = ['gvkey', 'lpermno', 'linktype', 'linkprim',
'linkdt', 'linkenddt']
# Open the user data
df = self.open_data(data, key).drop_duplicates().dropna()
## Create begin and edn of fiscal year variables
#df['endfyr'] = df.datadate
#df['beginfyr'] = (df.datadate - np.timedelta64(11, 'M')).astype('datetime64[M]')
# Open the link data
link = self.open_data(self.linktable, cols_link)
link = link.dropna(subset=['gvkey', 'lpermno', 'linktype', 'linkprim'])
# Retrieve the specified links
link = link[(link.linktype.isin(self.linktype))]
#link = link[['gvkey', 'lpermno', 'linkdt', 'linkenddt']]
# Merge the data
dm = df.merge(link, how='left', on='gvkey')
# Filter the dates (keep correct matches)
## Note: Use conditions from WRDS code.
cond1 = (dm.linkdt <= dm.datadate) | (pd.isna(dm.linkdt))
cond2 = (dm.datadate <= dm.linkenddt) | (pd.isna(dm.linkenddt))
dm = dm[cond1 & cond2]
# Deal with duplicates
dups = dm[key].duplicated(keep=False)
dmf = dm[~dups] # Final links list
dmd = dm[dups].set_index(['gvkey', 'datadate'])
## Favor linkprim, in order: 'P', 'C', 'J' and 'N'
for lp in ['P', 'C', 'J']:
dups_lp = dmd[dmd.linkprim==lp]
dmd = dmd[~dmd.index.isin(dups_lp.index)]
dups_lp = dups_lp.reset_index()
dmf = pd.concat([dmf, dups_lp])
# Rename lpermno to permno and remove unnecessary columns
dmf = dmf.rename(columns={'lpermno': 'permno'})
dmf = dmf[['gvkey', 'datadate', 'permno']]
# Check for duplicates on the key
n_dup = dmf.shape[0] - dmf[key].drop_duplicates().shape[0]
if n_dup > 0:
print("Warning: The merged permno",
"contains {:} duplicates".format(n_dup))
# Add the permno to the user's data
dfu = self.open_data(data, key).dropna()
dfin = dfu.merge(dmf, how='left', on=key)
dfin.index = dfu.index
return(dfin.permno)
def permno_from_cusip(self, data):
""" Returns CRSP permno from CUSIP.
Note: this function does not ensure a 1-to-1 mapping and there might
be more than one cusip for a given permno (several cusips may have the
same permno).
Args:
data -- User provided data.
Required columns: ['cusip']
The cusip needs to be the CRSP ncusip.
"""
dfu = self.open_data(data, ['cusip'])
cs = dfu.drop_duplicates()
pc = self.open_data(self.msenames, ['ncusip', 'permno'])
pc = pc.drop_duplicates()
# Merge the permno
csf = cs.merge(pc, how='left', left_on=['cusip'], right_on=['ncusip'])
csf = csf[['cusip', 'permno']].dropna().drop_duplicates()
dfin = dfu.merge(csf, how='left', on='cusip')
dfin.index = dfu.index
return(dfin.permno)
def _adjust_shares(self, data, col_shares):
""" Adjust the number of shares using CRSP cfacshr field.
Arguments:
data -- User provided data.
Required fields: [permno, 'col_shares', 'col_date']
col_shares -- The field with the number of shares from data.
col_date -- The date field from data to use to compute the
adjustment.
"""
# Open and prepare the user data
cols = ['permno', col_shares, self.d.col_date]
dfu = self.open_data(data, cols)
index = dfu.index
dt = pd.to_datetime(dfu[self.d.col_date]).dt
dfu['year'] = dt.year
dfu['month'] = dt.month
# Open and prepare the CRSP data
cols = ['permno', 'date', 'cfacshr']
df = self.open_data(self.msf, cols)
dt = pd.to_datetime(df.date).dt
df['year'] = dt.year
df['month'] = dt.month
# Merge the data
key = ['permno', 'year', 'month']
dfu = dfu[key+[col_shares]].merge(df[key+['cfacshr']],
how='left', on=key)
dfu.loc[dfu.cfacshr.isna(), 'cfacshr'] = 1
# Compute the adjusted shares
dfu['adj_shares'] = dfu[col_shares] * dfu.cfacshr
dfu.index = index
return(dfu.adj_shares.astype('float32'))
def _get_fields(self, fields, data=None, file=None):
""" Returns the fields from CRSP.
This function is only used internally for the CRSP module.
Arguments:
fields -- Fields from file_fund
data -- User provided data
Required columns: [permno, date]
If none given, returns the entire compustat with key.
Otherwise, return only the fields with the data index.
file -- File to use. Default to stock files
"""
# Get the fields
# Note: CRSP data is clean without duplicates
if not file:
file = self.sf
key = [self.col_id, self.col_date]
if file == self.si:
key = [self.col_date]
df = self.open_data(file, key+fields)
# Construct the object to return
if data is not None:
# Merge and return the fields
data_key = self.open_data(data, key)
index = data_key.index
dfin = data_key.merge(df, how='left', on=key)
dfin.index = index
return(dfin[fields])
else:
# Return the entire dataset with keys
return(df)
def get_fields_daily(self, fields, data):
""" Returns the fields from CRSP daily.
Arguments:
fields -- Fields from file_fund
data -- User provided data
Required columns: [permno, date]
If none given, returns the entire compustat with key.
Otherwise, return only the fields with the data index.
Requires:
self.d.col_date -- Date field to use for the user data
"""
keyu = ['permno', self.d.col_date]
dfu = self.open_data(data, keyu)
dfu.loc[:, self.col_date] = dfu[self.d.col_date]
dfu[fields] = self._get_fields(fields, dfu, self.dsf)
return(dfu[fields])
# def _get_window_sort(self, nperiods, caldays, min_periods):
# # Define the window and how the data should be sorted
# if caldays is None:
# window = abs(nperiods)
# ascending = (nperiods < 0)
# else:
# window = str(abs(caldays)) + "D"
# ascending = (caldays < 0)
# if min_periods is None:
# print("Warning: It is advised to provide a minimum number of observations "
# "to compute aggregate values when using the 'caldays' arguments. "
# "No doing so will result in small rolling windows.")
# return window, ascending
def _value_for_data(self, var, data, ascending, useall):
"""" Add values to the users data and return the values.
Arguments:
df -- Internal data containing the values
Columns: [permno, date]
The data is indexed by date and grouped by permno.
data -- User data
Columns: [permno, wrds.col_date]
nperiods -- Number of periods to compute the variable
useall -- If True, use the compounded return of the last
available trading date (if nperiods<0) or the
compounded return of the next available trading day
(if nperiods>0).
"""
key = self.key
var.name = 'var'
values = var.reset_index()
#if nperiods == 0:
# values = var.reset_index()
#else:
# # Check that the shift onl occurs within permnos
# import ipdb; ipd.set_trace();
# values = var.shift(-nperiods).reset_index()
# Make sure the types are correct
values = self._correct_columns_types(values)
# Open user data
cols_data = [self.col_id, self.d.col_date]
dfu = self.open_data(data, cols_data)
# Prepare the dataframes for merging
dfu = dfu.sort_values(self.d.col_date)
dfu = dfu.dropna()
values = values.sort_values(self.col_date)
if useall:
# Merge on permno and on closest date
# Use the last or next trading day if requested
# Shift a maximum of 6 days
if ascending:
direction = 'backward'
else:
direction = 'forward'
dfin = pd.merge_asof(dfu, values,
left_on=self.d.col_date,
right_on=self.col_date,
by=self.col_id,
tolerance=pd.Timedelta('6 day'),
direction=direction)
else:
dfin = dfu.merge(values, how='left', left_on=cols_data, right_on=self.key)
dfin.index = dfu.index
return(dfin['var'].astype('float32'))
def _value_for_data_index(self, var, data, ascending, useall):
"""" Add indexes values to the users data and return the values.
Arguments:
df -- Internal data containing the values
Columns: [permno, date]
The data is indexed by date and grouped by permno.
data -- User data
Columns: [permno, wrds.col_date]
nperiods -- Number of periods to compute the variable
useall -- If True, use the compounded return of the last
available trading date (if nperiods<0) or the
compounded return of the next available trading day
(if nperiods>0).
"""
var.name = 'var'
values = var.reset_index()
values = self._correct_columns_types(values)
# Open user data
cols_data = [self.d.col_date]
dfu = self.open_data(data, cols_data)
# Prepare the dataframes for merging
dfu = dfu.sort_values(self.d.col_date)
dfu = dfu.dropna()
values = values.sort_values(self.col_date)
if useall:
# Merge on permno and on closest date
# Use the last or next trading day if requested
# Shift a maximum of 6 days
if ascending:
direction = 'backward'
else:
direction = 'forward'
dfin = pd.merge_asof(dfu, values,
left_on=self.d.col_date,
right_on=self.col_date,
tolerance= | pd.Timedelta('6 day') | pandas.Timedelta |
from __future__ import division
import pandas as pd
import os.path
import sys
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
from .earthworm_functions import EarthwormFunctions
class EarthwormInputs(ModelSharedInputs):
"""
Input class for Earthworm.
"""
def __init__(self):
"""Class representing the inputs for Earthworm"""
super(EarthwormInputs, self).__init__()
self.k_ow = | pd.Series([], dtype="float") | pandas.Series |
# -*- coding: utf-8 -*-
# test/unit/stat/test_period.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test Period class"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
from asyncio import Lock
from threading import Lock as TLock
# Third-party imports
import pandas as pd
import pytest
# Local imports
from loadlimit.stat import Period
from loadlimit.util import aiter
# ============================================================================
# Test total()
# ============================================================================
def test_total():
"""Returns total number of datapoints in the data structure"""
p = Period()
for i in range(5):
p[i]['timedata'].extend(range(5))
expected = 25
assert p.total() == expected
assert p.numdata == expected
@pytest.mark.asyncio
async def test_atotal():
"""Async version of total()"""
p = Period()
async for i in aiter(range(5)):
p[i]['timedata'].extend(range(5))
expected = 25
result = await p.atotal()
assert result == expected
assert p.numdata == expected
# ============================================================================
# Test clearvals
# ============================================================================
def test_clearvals_all():
"""Clearvals empties every list in the container"""
p = Period()
for i in range(5):
p[i]['timedata'].extend(range(5))
p.clearvals()
assert p.numdata == 0
for v in p.values():
assert len(v['timedata']) == 0
def test_clearvals_key():
"""Clearvals empties only the list for the specific key"""
p = Period()
for i in range(5):
p[i]['timedata'].extend(range(5))
p.clearvals(4)
assert p.numdata == 20
for i, v in p.items():
if i == 4:
assert len(v['timedata']) == 0
else:
assert len(v['timedata']) == 5
# ============================================================================
# Test aclearvals()
# ============================================================================
@pytest.mark.asyncio
async def test_aclearvals_all():
"""Clearvals empties every list in the container"""
p = Period()
async for i in aiter(range(5)):
p[i]['timedata'].extend(range(5))
await p.aclearvals()
assert p.numdata == 0
async for v in aiter(p.values()):
assert len(v['timedata']) == 0
@pytest.mark.asyncio
async def test_aclearvals_key():
"""Clearvals empties only the list for the specific key"""
p = Period()
async for i in aiter(range(5)):
p[i]['timedata'].extend(range(5))
await p.aclearvals(4)
assert p.numdata == 20
async for i, v in aiter(p.items()):
if i == 4:
assert len(v['timedata']) == 0
else:
assert len(v['timedata']) == 5
# ============================================================================
# Test period lock
# ============================================================================
def test_period_lockarg():
"""Use custom Lock instance with Period"""
mylock = Lock()
p = Period(lock=mylock)
assert p.lock is mylock
def test_period_defaultlock():
"""Create new Lock object if lock not specified"""
p = Period()
assert p.lock
assert isinstance(p.lock, Lock)
assert not p.lock.locked()
@pytest.mark.parametrize('obj', [42, 4.2, '42', [42], (4.2, ), TLock])
def test_period_lockarg_notlock(obj):
"""Non- asyncio.Lock objects raises an error"""
expected = ('lock expected asyncio.Lock, got {} instead'.
format(type(obj).__name__))
with pytest.raises(TypeError) as err:
Period(lock=obj)
assert err.value.args == (expected, )
# ============================================================================
# Test addtimedata
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42]])
def test_addtimedata_not_series(val):
"""Raise error if the data arg is not a pandas.Series object"""
stat = Period()
expected = ('data expected pandas.Series, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
stat.addtimedata(42, val)
assert err.value.args == (expected, )
# ============================================================================
# Test adderror
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42]])
def test_adderror_not_series(val):
"""Raise error if the data arg is not a pandas.Series object"""
stat = Period()
expected = ('data expected pandas.Series, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
stat.adderror(42, val)
assert err.value.args == (expected, )
def test_adderror_series():
"""Add a series to the dict"""
stat = Period()
error = Exception('i am an error')
s = pd.Series([1, 1, 0, repr(error)])
stat.adderror('42', s)
errors = list(stat.error('42'))
assert len(errors) == 1
assert errors[0] is s
# ============================================================================
# Test addfailure
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42]])
def test_addfailure_not_series(val):
"""Raise error if the data arg is not a pandas.Series object"""
stat = Period()
expected = ('data expected pandas.Series, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
stat.addfailure(42, val)
assert err.value.args == (expected, )
def test_addfailure_series():
"""Add a series to the dict"""
stat = Period()
error = 'i am a failure'
s = pd.Series([1, 1, 0, error])
stat.addfailure('42', s)
failures = list(stat.failure('42'))
assert len(failures) == 1
assert failures[0] is s
# ============================================================================
# Test numtimedata
# ============================================================================
@pytest.mark.parametrize('maxnum', list(range(1, 6)))
def test_numtimedata(maxnum):
"""Return number of time data stored"""
key = 'hello'
stat = Period()
for i in range(maxnum):
s = | pd.Series([1, 1, i]) | pandas.Series |
from unittest import TestCase
from unittest.mock import Mock
import numpy as np
import pandas as pd
from rdt.transformers import BinaryEncoder
from rdt.transformers.null import NullTransformer
class TestBinaryEncoder(TestCase):
def test___init__(self):
"""Test default instance"""
# Run
transformer = BinaryEncoder()
# Asserts
error_message = 'Unexpected missing_value_replacement'
assert transformer.missing_value_replacement is None, error_message
assert not transformer.model_missing_values, 'model_missing_values is False by default'
def test_get_output_sdtypes_model_missing_values_column_created(self):
"""Test the ``get_output_sdtypes`` method when a null column is created.
When a null column is created, this method should apply the ``_add_prefix``
method to the following dictionary of output sdtypes:
output_sdtypes = {
'value': 'float',
'is_null': 'float'
}
Setup:
- initialize a ``BinaryEncoder`` transformer which:
- sets ``self.null_transformer`` to a ``NullTransformer`` where
``self._model_missing_values`` is True.
- sets ``self.column_prefix`` to a string.
Output:
- the ``output_sdtypes`` dictionary, but with ``self.column_prefix``
added to the beginning of the keys.
"""
# Setup
transformer = BinaryEncoder()
transformer.null_transformer = NullTransformer(missing_value_replacement='fill')
transformer.null_transformer._model_missing_values = True
transformer.column_prefix = 'abc'
# Run
output = transformer.get_output_sdtypes()
# Assert
expected = {
'abc.value': 'float',
'abc.is_null': 'float'
}
assert output == expected
def test__fit_missing_value_replacement_ignore(self):
"""Test _fit missing_value_replacement equal to ignore"""
# Setup
data = pd.Series([False, True, True, False, True])
# Run
transformer = BinaryEncoder(missing_value_replacement=None)
transformer._fit(data)
# Asserts
error_msg = 'Unexpected fill value'
assert transformer.null_transformer._missing_value_replacement is None, error_msg
def test__fit_missing_value_replacement_not_ignore(self):
"""Test _fit missing_value_replacement not equal to ignore"""
# Setup
data = pd.Series([False, True, True, False, True])
# Run
transformer = BinaryEncoder(missing_value_replacement=0)
transformer._fit(data)
# Asserts
error_msg = 'Unexpected fill value'
assert transformer.null_transformer._missing_value_replacement == 0, error_msg
def test__fit_array(self):
"""Test _fit with numpy.array"""
# Setup
data = pd.Series([False, True, True, False, True])
# Run
transformer = BinaryEncoder(missing_value_replacement=0)
transformer._fit(data)
# Asserts
error_msg = 'Unexpected fill value'
assert transformer.null_transformer._missing_value_replacement == 0, error_msg
def test__transform_series(self):
"""Test transform pandas.Series"""
# Setup
data = pd.Series([False, True, None, True, False])
# Run
transformer = Mock()
BinaryEncoder._transform(transformer, data)
# Asserts
expect_call_count = 1
expect_call_args = pd.Series([0., 1., None, 1., 0.], dtype=float)
error_msg = 'NullTransformer.transform must be called one time'
assert transformer.null_transformer.transform.call_count == expect_call_count, error_msg
pd.testing.assert_series_equal(
transformer.null_transformer.transform.call_args[0][0],
expect_call_args
)
def test__transform_array(self):
"""Test transform numpy.array"""
# Setup
data = pd.Series([False, True, None, True, False])
# Run
transformer = Mock()
BinaryEncoder._transform(transformer, data)
# Asserts
expect_call_count = 1
expect_call_args = pd.Series([0., 1., None, 1., 0.], dtype=float)
error_msg = 'NullTransformer.transform must be called one time'
assert transformer.null_transformer.transform.call_count == expect_call_count, error_msg
pd.testing.assert_series_equal(
transformer.null_transformer.transform.call_args[0][0],
expect_call_args
)
def test__reverse_transform_missing_value_replacement_ignore(self):
"""Test _reverse_transform with missing_value_replacement equal to ignore"""
# Setup
data = | pd.Series([0.0, 1.0, 0.0, 1.0, 0.0]) | pandas.Series |
"""Adapted from https://github.com/mdeff/fma/blob/master/utils.py"""
import os
import ast
import pandas as pd
def load(filepath):
filename = os.path.basename(filepath)
if 'features' in filename:
return | pd.read_csv(filepath, index_col=0, header=[0, 1, 2]) | pandas.read_csv |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
from CHECLabPy.core.io import HDF5Reader, HDF5Writer
from sstcam_sandbox import get_data
from os.path import dirname, abspath
import numpy as np
import pandas as pd
from IPython import embed
DIR = abspath(dirname(__file__))
def process(path, output):
with HDF5Reader(path) as reader:
df = reader.read("data")
d_list = []
for extractor, group in df.groupby("extractor"):
params = dict(extractor=extractor)
for key, group_key in group.groupby("key"):
charge = group_key['charge'].values
params[f'mean_{key}'] = np.mean(charge)
params[f'std_{key}'] = np.std(charge)
d_list.append(params)
df_output = | pd.DataFrame(d_list) | pandas.DataFrame |
import pandas as pd
import numpy as np
import pytest
from column_completer import ColumnCompleter
X = np.random.randint(0, 100, (8, 3))
def test_name_collision_value_error_1():
df = pd.DataFrame(X, columns=["Col A", "Col_A", "Col B"])
with pytest.raises(ValueError) as err:
q = ColumnCompleter(df)
assert "spaces causes a collision of column names" in str(err.value)
def test_attribute_space_replaced_1():
df = pd.DataFrame(X, columns=["Col A", "col B", "Col C"])
q = ColumnCompleter(df)
assert all([col.startswith('Col_')
for col in vars(q) if col.startswith('Col')])
def test_attribute_space_replaced_2():
df = | pd.DataFrame(X, columns=["Col A", "col B", "Col C"]) | pandas.DataFrame |
from glob import glob
import os
import json
import pandas as pd
import pickle
import numpy as np
def load_results(data_folder):
"""Load experiment results as pickled RepeatedTaskResult object"""
result_fns = glob(os.path.join(data_folder, "result_task_*.pkl"))
all_results = []
for result_fn in result_fns:
with open(result_fn, "rb") as f:
result = pickle.load(f)
if len(result) == 1:
all_results += result
else:
all_results.append(result)
return all_results
def parse_results(tasks, mode):
"""Convert list of RepeatedTaskResult objects to pandas dataframe"""
dfs = []
for i_task, task_data in enumerate(tasks):
dfs_per_task = []
for i, response_data in enumerate(task_data.responses):
response_df = pd.DataFrame(response_data["main_data"])
response_df["response_index"] = i
response_df["worker_id"] = task_data[1][i][
"worker_id"
] # reading out from raw_responses
response_df.loc[response_df["is_demo"] == True, "trial_index"] = np.arange(
-len(response_df[response_df["is_demo"] == True]), 0
)
response_df.loc[response_df["is_demo"] == False, "trial_index"] = np.arange(
len(response_df[response_df["is_demo"] == False])
)
dfs_per_task.append(response_df)
task_df = pd.concat(dfs_per_task, 0)
task_df["task_number"] = int(task_data.task_id.split("-")[-1])
task_df["task_id"] = task_data.task_id
dfs.append(task_df)
if len(dfs) > 0:
df = pd.concat(dfs, 0)
df["mode"] = mode
df = df.reset_index().drop("index", axis=1)
df["choice_number"] = df["choice"].map(lambda x: -1 if x == "a" else 1)
return df
return None
def parse_feedback(tasks, mode):
dfs = []
for i_task, task_data in enumerate(tasks):
for i, response_data in enumerate(task_data.responses):
surveys = [
it
for it in response_data["raw_data"]
if it["trial_type"] == "survey-text"
]
feedback = "\n".join(
[json.loads(it["responses"])["feedback"] for it in surveys]
)
dfs.append(
pd.DataFrame(
{
"response_index": i,
"task_number": int(task_data.task_id.split("-")[-1]),
"task_id": task_data.task_id,
"feedback": feedback,
"mode": mode,
},
index=[len(dfs)],
)
)
if len(dfs) > 0:
df = | pd.concat(dfs, 0) | pandas.concat |
import calendar
import numpy as np
import pandas as pd
emergency_manager_csv = pd.read_csv('clb_error_manager_list.csv', encoding='utf-8')
df = | pd.DataFrame(emergency_manager_csv) | pandas.DataFrame |
#! /usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from os import listdir
from os.path import isfile, join
import os
import sys
import time
import pandas as pd
import numpy as np
import re
import hashlib
import logging
import joblib
import gzip
from scipy import stats
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import pkg_resources
from mmbot.decoder import return_decoded_value
if sys.version_info >= (3, 0):
from oletools.olevba3 import VBA_Parser
else:
from oletools.olevba import VBA_Parser
class MaliciousMacroBot:
def __init__(self, benign_path=None, malicious_path=None,
model_path=pkg_resources.resource_filename('mmbot', 'model'), retain_sample_contents=False):
"""
Constructor to setup path variables for model and sample data and initialize object.
:param benign_path: directory path (relative or absolute) to benign documents for the machine learning model to learn from.
:param malicious_path: directory path (relative or absolute) to malicious documents for the machine learning model to learn from.
:param model_path: directory where modeldata.pickle and vocab.txt files are kept.
:param retain_sample_contents: this relates to level of detail saved in the model data. If True, potentially sensitive
information like extracted vba will be stored in the model's pickle file. The benefit is that incremental
models can be built, where adding a new file to the training set will result in only reprocessing that one new
file. Otherwise all files in the benign_path and malicious_path will be reprocessed each time the model is
rebuilt. If you are experimenting with building many models and comparing results, set this to True,
otherwise keep it to False.
"""
# os.path.join(os.path.dirname(__file__), 'model')
self.clear_state()
self.set_model_paths(benign_path, malicious_path, model_path)
self.retain_sample_contents = retain_sample_contents
def clear_state(self):
"""
Resets object's state to clear out all model internals created after loading state from disk
"""
self.cls = None
self.modeldata = None
self.features = {}
def set_model_paths(self, benign_path, malicious_path, model_path):
"""
Helper function to set up paths to files and pre-emptively identify issues with the existence of files and
paths that will be an issue later.
:param benign_path: directory path (relative or absolute) to benign documents for the machine learning model to learn from.
:param malicious_path: directory path (relative or absolute) to malicious documents for the machine learning model to learn from.
:param model_path: directory where model files and helpful data will be saved for the algorithm to function.
"""
try:
# One of the two paths is None
if (benign_path is None and malicious_path is not None) or (
benign_path is not None and malicious_path is None):
raise IOError("""ERROR: When supplying benign_path and malicious_path, both paths must have samples to
build a classification model. Either values can be None and an existing saved model
can be supplied, or paths can exist with corresponding office files and a new model
can be built.""")
# All three paths are None
if benign_path is None and malicious_path is None and model_path is None:
raise IOError(
"ERROR: All paths supplied for benign_path, malicious_path, and model_path cannot be None")
# Make sure provided paths actually do exist
if benign_path and malicious_path:
self.malicious_path = os.path.join(malicious_path, '')
if not os.path.exists(malicious_path) or not os.path.isdir(malicious_path):
raise IOError("ERROR: The malicious_path provided {} does not exist".format(malicious_path))
self.benign_path = os.path.join(benign_path, '')
if not os.path.exists(benign_path) or not os.path.isdir(benign_path):
raise IOError("ERROR: The benign_path provided {} does not exist".format(benign_path))
if model_path is not None:
self.model_path = os.path.join(model_path, '')
self.vba_vocab = os.path.join(self.model_path, 'vocab.txt')
self.modeldata_pickle = os.path.join(self.model_path, 'modeldata.pickle')
self.modeldata_pickle_gz = os.path.join(self.model_path, 'modeldata.pickle.gz')
# If the user-supplied path does not exist, use the default vocab.txt that comes with the package
if not os.path.exists(self.vba_vocab):
self.vba_vocab = os.path.join(pkg_resources.resource_filename('mmbot', 'model'), 'vocab.txt')
except Exception as e:
self.malicious_path = './tests/samples/malicious/'
raise IOError("ERROR: Supplied benign_path, malicious_path, or model_path does not "
"exist or is not a directory. {}".format(str(e)))
def get_file_hash(self, pathtofile):
"""
Computes the MD5 hash of the file
:param pathtofile: absolute or relative path to a file
:return: md5 hash of file as a string
"""
if os.path.isfile(pathtofile):
with open(pathtofile, 'rb') as file_to_hash:
filedata = file_to_hash.read()
md5 = hashlib.md5(filedata).hexdigest()
# sha1 = hashlib.sha1(filedata).hexdigest()
# sha256 = hashlib.sha256(filedata).hexdigest()
return md5
return None
def fill_missing_hashes(self, row):
"""
Checks if there is a null or NaN value for the 'md5' column. If so, computes it, if not,
returns original value. Used to fill in missing md5's in a dataframe.
:param row: a row of a dataframe with a column named 'md5' and 'filepath'
:return: for any missing md5 values, computes the hash on the given filepath
"""
if pd.isnull(row['md5']):
return self.get_file_hash(row['filepath'])
else:
return row['md5']
def get_file_meta_data(self, filepath, filename=None, getHash=False):
"""
helper function to get meta information about a file to include it's path, date modified, size
:param filepath: path to a file
:param filename: filename
:param getHash: whether or not the hash should be computed
:return: a tuple of format (filename, filepath, filesize, filemodified, md5)
"""
if filename is None:
filename = os.path.split(filepath)[1]
filemodified = time.ctime(os.path.getmtime(filepath))
filesize = os.path.getsize(filepath)
md5 = np.nan
if getHash:
md5 = self.get_file_hash(filepath)
return (filename, filepath, filesize, filemodified, md5)
def get_samples_from_disk(self, path=None, getHash=False):
"""
Given a path to a file or folder of files, recursively lists all files and metadata for the files
:param path: directory path
:param getHash: boolean, indicating whether or not to compute hash
:return: a dataframe with the filename, filepath, filesize, modified date, and md5 hash for each file found
"""
if not os.path.exists(path):
raise IOError("ERROR: File or path does not exist: {}".format(path, ))
if os.path.isfile(path):
meta = self.get_file_meta_data(path, getHash=getHash)
return pd.DataFrame({'filename': (meta[0],),
'filepath': (meta[1],),
'filesize': (meta[2],),
'filemodified': (meta[3],),
'md5': (meta[4],)})
try:
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(root, filename)
meta = self.get_file_meta_data(filepath, filename, getHash=getHash)
matches.append(meta)
if len(matches) > 0:
filenames, paths, sizes, dates, md5s = zip(*matches)
return pd.DataFrame({'filename': filenames, 'filepath': paths, 'filesize': sizes, \
'filemodified': dates, 'md5': md5s})
return pd.DataFrame()
except Exception as e:
raise IOError("ERROR with file or path {}: {}".format(path, str(e)))
def get_family_name(self, mypath):
"""
Given a file path, return the deepest directory name to allow organizing samples by name and having that meta
data in predictions
:param mypath: path to a file in the model training set
:return: deepest directory name and 'Unknown' if ther eis a problem with a part of the file path
"""
normalized_path = os.path.dirname(os.path.abspath(mypath))
m = re.match(r'.*[\\/](.*?$)', normalized_path)
try:
group = m.group(1)
if len(group) > 0:
return group
return 'Unknown'
except:
return 'Unknown'
def new_samples(self, existing, possiblenew):
"""
Returns dataframe containing rows from possiblenew with MD5 hashes that are not in existing, to identify
new file samples.
:param existing: dataframe containing an 'md5' field
:param possiblenew: dataframe containing an 'md5' field
:return: Returns dataframe containing rows from possiblenew with MD5 hashes that are not in existing.
"""
existing_items = existing['md5'].tolist()
possiblenew_items = possiblenew['md5'].tolist()
actualnew_items = [x for x in possiblenew_items if x not in existing_items]
if len(actualnew_items) > 0:
return possiblenew[possiblenew['md5'].isin(actualnew_items)].copy()
return None
def get_language_features(self):
"""
After vba has been extracted from all files, this function does feature extraction on that vba and prepares
everything for a model to be built. load_model_data has been called, populating self.modeldata
:return: feature matrix and labels in a dictionary structure with keys 'X' and 'y' respectively
"""
self.load_model_vocab()
# Get custom VBA features
self.modeldata = pd.concat([self.modeldata, self.modeldata.extracted_vba.apply(self.get_vba_features)], axis=1)
tempfeatures = self.modeldata.columns
self.features['vba_features'] = [x for x in tempfeatures if x.startswith('vba_')]
# Count Vectorizer
vocab_lower = [x.lower() for x in self.features['vocab']]
vocab_lower = list(set(vocab_lower))
self.model_cntvect = CountVectorizer(vocabulary=vocab_lower,
lowercase=True,
decode_error='ignore',
token_pattern=r"(?u)\b\w[\w\.]+\b")
self.modeldata_cnts = self.model_cntvect.fit_transform(self.modeldata['extracted_vba'])
self.features['cnt_features'] = ['cnt_' + x for x in self.model_cntvect.get_feature_names()]
self.features['features'] = self.model_cntvect.get_feature_names()
self.modeldata = self.modeldata.join(pd.DataFrame(self.modeldata_cnts.toarray(),
columns=self.features['cnt_features']))
# TF-IDF Transformer
self.model_tfidf_trans = TfidfTransformer()
self.model_tfidf_cntvect = self.model_tfidf_trans.fit_transform(self.modeldata_cnts.toarray())
self.features['tfidf_features'] = ['tfidf_' + x for x in self.features['features']]
self.modeldata = self.modeldata.join(pd.DataFrame(self.model_tfidf_cntvect.toarray(),
columns=self.features['tfidf_features']))
# Train and Test Model
predictive_features = self.features['tfidf_features'] + self.features['vba_features']
self.features['predictive_features'] = predictive_features
self.clf_X = self.modeldata[predictive_features].values
self.clf_y = np.array(self.modeldata['label'])
return {'X': self.clf_X, 'y': self.clf_y}
def clear_model_features(self):
"""
Removes all columns from modeldata with names starting with cnt_, tfidf_, or vba_
These are the computed columns for the model
"""
if self.modeldata is not None:
columns = self.modeldata.columns
cntcolumns = [x for x in columns if x.startswith('cnt_')]
vba_feature_columns = [x for x in columns if x.startswith('vba_')]
tfidfcolumns = [x for x in columns if x.startswith('tfidf_')]
self.modeldata.drop(self.modeldata[cntcolumns], axis=1, inplace=True)
self.modeldata.drop(self.modeldata[vba_feature_columns], axis=1, inplace=True)
self.modeldata.drop(self.modeldata[tfidfcolumns], axis=1, inplace=True)
def build_models(self):
"""
After get_language_features is called, this function builds the models based on
the classifier matrix and labels.
:return:
"""
self.cls = RandomForestClassifier(n_estimators=100, max_features=.2)
# build classifier
self.cls.fit(self.clf_X, self.clf_y)
return self.cls
def load_model_vocab(self):
"""
Loads vocabulary used in the bag of words model
:return: fixed vocabulary that was loaded into internal state
"""
with open(self.vba_vocab) as vocabfile:
lines = vocabfile.readlines()
lines = [x.strip() for x in lines]
self.features['vocab'] = set(lines)
return self.features['vocab']
def load_model_data(self, exclude=None):
"""
Merges previously saved model data (if exists) with new files found in malicious and benign doc paths.
:param exclude: string value - if samples (including path) from the training set contain this string,
they will be omitted from the model. This is primarily used to hold malware families from consideration
in the model to assess classification generalization to new unknown families.
:return: number of new documents loaded into the model
"""
newdoc_cnt = 0
knowndocs = None
# Clear all stored contents because we don't save enough detail to pick up where we left off last time
if self.modeldata is not None:
knowndocs = self.modeldata.copy(deep=True)
try:
if self.malicious_path:
maldocs = self.get_samples_from_disk(self.malicious_path)
except:
self.malicious_path = './tests/samples/malicious/'
self.benign_path = './tests/samples/benign/'
self.model_path = './tests/samples/model/'
maldocs = self.get_samples_from_disk(self.malicious_path)
if len(maldocs) > 0:
maldocs['label'] = 'malicious'
benigndocs = self.get_samples_from_disk(self.benign_path)
if len(benigndocs) > 0:
benigndocs['label'] = 'benign'
if len(benigndocs) == 0 and len(maldocs) == 0 and knowndocs is None:
raise IOError("ERROR: Unable to load saved model data {} or process samples rooted in model path {}. "
"Unable to make predictions.".format(self.modeldata_pickle, self.model_path))
possiblenew = | pd.concat([maldocs, benigndocs], axis=0) | pandas.concat |
from train import make_model_path
import tensorflow as tf
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import seaborn
import pandas as pd
def predict(config, model, batch_data_test, load_data=1):
# load and predict
y_pred_list = []
y_real_list = []
with tf.Session() as sess:
if load_data:
model.restore(make_model_path(config),sess)
print("start prediction")
for ds in batch_data_test:
loss, pred = model.predict(ds, sess)
y_pred_list.append(pred)
y_real_list.append(ds[-1])
# reshape
y_pred = np.asarray(y_pred_list).reshape(-1)
y_test = np.asarray(y_real_list).reshape(-1)
return y_pred, y_test
def evaluate(config, model, batch_data_test, load_data=1):
''' evaluate data with plot'''
y_pred, y_test = predict(config, model, batch_data_test, load_data)
# create the list of difference between prediction and test data
diff= abs(y_pred - y_test)
ratio= abs(y_pred/y_test)
# plot the difference and the threshold (for the test data)
# An estimation of anomly population of the dataset
outliers_fraction = 0.01
# select the most distant prediction/reality data points as anomalies
diff = | pd.Series(diff) | pandas.Series |
#!/Tsan/bin/python
# -*- coding: utf-8 -*-
# Libraries to use
from __future__ import division
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import json
import mysql.connector
# 读取数据库的指针设置
with open('conf.json', 'r') as fd:
conf = json.load(fd)
src_db = mysql.connector.connect(**conf['src_db'])
# 一些常量
riskFreeRate = 0.02 # 无风险利率
varThreshold =0.05 # 5%VAR阈值
scaleParameter = 50 # 一年50周
# 表名
index_data_table = 'fund_weekly_index' # index时间序列数据
index_name_table = 'index_id_name_mapping'
type_index_table = 'index_stype_code_mapping' # 表格名称-不同基金种类对应的指数
# 私募指数基金分类表格对应(只需要跑一次)
def get_type_index_table(tableName = type_index_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('stype_code',inplace=True)
return pdResult
# 私募指数名称及ID分类表格对应(只需要跑一次)
def get_index_table(tableName = index_name_table):
try:
#sql_query='select id,name from student where age > %s'
cursor = src_db .cursor()
sql = "select * from %s" % (tableName)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
#pdResult = dict(result)
pdResult = pd.DataFrame(result)
pdResult = pdResult.dropna(axis=0)
pdResult.columns = [i[0] for i in cursor.description]
pdResult.set_index('index_id',inplace=True)
return pdResult
# 私募指数净值的时间序列
def get_index(index, tableName=index_data_table):
try:
# sql_query='select id,name from student where age > %s'
cursor = src_db.cursor()
sql = "select index_id,statistic_date,index_value from %s where index_id = '%s'" % (tableName, index)
cursor.execute(sql)
result = cursor.fetchall()
finally:
pass
pdResult = pd.DataFrame(result, dtype=float)
pdResult.columns = ['index', 'date', 'net_worth']
pdResult = pdResult.drop_duplicates().set_index('date')
pdResult = pdResult.dropna(axis=0)
pdResult = pdResult.fillna(method='ffill')
return pdResult
# 按季度分类
def byseasons(x):
if 1<=x.month<=3:
return str(x.year)+'_'+str(1)
elif 4<= x.month <=6:
return str(x.year)+'_'+str(2)
elif 7<= x.month <=9:
return str(x.year)+'_'+str(3)
else:
return str(x.year)+'_'+str(4)
# 计算最大回撤,最大回撤开始结束时间
def cal_max_dd_indicator(networthSeries):
maxdd = pd.DataFrame(index = networthSeries.index, data=None, columns =['max_dd','max_dd_start_date','max_dd_end_date'],dtype = float)
maxdd.iloc[0] = 0
maxdd.is_copy = False
for date in networthSeries.index[1:]:
maxdd.loc[date] = [1 - networthSeries.loc[date] / networthSeries.loc[:date].max(),networthSeries.loc[:date].idxmax(),date]
#maxdd[['max_dd_start_date','max_dd_end_date']].loc[date] = [[networthSeries.loc[:date].idxmax(),date]]
#maxdd['max_dd_start_date'].loc[date] = networthSeries.loc[:date].idxmax()
return maxdd['max_dd'].max(), maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_start_date'],maxdd.loc[maxdd['max_dd'].idxmax]['max_dd_end_date']
# 计算最大回撤(每季度),输入为dataframe,输出也为dataframe
def cal_maxdd_by_season(df):
seasonList = sorted(list(set(df['season'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['season'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'season'
return maxdd_df
# 计算最大回撤(每年),输入为dataframe,输出也为dataframe
def cal_maxdd_by_year(df):
seasonList = sorted(list(set(df['year'].values)))
maxdd_dict = {}
for season in seasonList:
temp = df[df['year'] == season]
maxdd_dict[season] = np.round(cal_max_dd_indicator(temp['net_worth'])[0],4)
maxdd_df = pd.DataFrame([maxdd_dict]).T
maxdd_df.columns =[df['index'].iloc[0]]
maxdd_df.index.name = 'year'
return maxdd_df
# 准备数据原始dataframe
def get_count_data(cnx):
cursor = cnx.cursor()
sql = "select fund_id,foundation_date,fund_type_strategy from fund_info"
cursor.execute(sql)
result = cursor.fetchall()
df = pd.DataFrame(result)
df.columns = ['fund_id', 'found_date', 'strategy']
sql = "select type_id, strategy from index_type_mapping"
cursor.execute(sql)
result = cursor.fetchall()
meg = pd.DataFrame(result)
meg.columns = ['type_id', 'strategy']
# 数据清理
df = df.dropna()
df = df[df['strategy'] != u'']
# 合并对应表
df = pd.merge(df, meg)
# 加年份列
df['year'] = [str(i.year) for i in df['found_date']]
# 加月份列
df['month'] = [str(i.year) + '_' + str(i.month) for i in df['found_date']]
return df.drop('strategy', axis=1)
# 得到按年份分类统计,输出 dataframe
def get_ann_fund(df):
temp = df.groupby(['type_id', 'year'])['fund_id'].count().to_frame() # 分类转dataframe
temp = pd.pivot_table(temp, values='fund_id', index='year', columns=['type_id'])
temp['Type_0'] = df.groupby(['year'])['fund_id'].count().to_frame()['fund_id'] # 添加全市场数据
temp.sort_index(axis=0)
temp.sort_index(axis=1, inplace=True)
return temp
# 得到按月份分类统计, 输出dataframe
def get_month_fund(df):
temp = df.groupby(['type_id', 'month'])['fund_id'].count().to_frame() # 分类转dataframe
temp = | pd.pivot_table(temp, values='fund_id', index=['month'], columns=['type_id']) | pandas.pivot_table |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 08:45:40 2019
@author: badat
"""
import os,sys
pwd = os.getcwd()
sys.path.insert(0,pwd)
print('-'*30)
print(os.getcwd())
print('-'*30)
import wikipedia
import pandas as pd
import nltk
import numpy as np
from nltk.tokenize import MWETokenizer
import multiprocessing as mp
import pdb
import pickle
#%%
k = 5
#%%
def load_1k_name():
path = '/home/project_amadeus/mnt/raptor/hbdat/data/MSCOCO_1k/meta/vocab_coco.pkl' #'./data/MSCOCO/vocab_coco.pkl'#
with open(path,'rb') as f:
vocab = pickle.load(f)
return vocab['words'],vocab['poss']
classes,poss = load_1k_name()
classes=np.array(classes)
poss=np.array(poss)
n_classes = len(classes)
#%%
def tokenize(sentence):
words = nltk.word_tokenize(sentence)
words = [word.lower() for word in words]
return words
def get_encoded_word(word):
return tokenizer.tokenize(tokenize(word))[0]
def get_top_relation(keyword):
content=wikipedia.summary(keyword)#wikipedia.page(name).content#
words=tokenizer.tokenize(tokenize(content))
words = [word for word in words if (word in classes_NN)]
fdist = nltk.FreqDist(words)
top_relation = []
active_label = np.zeros(n_classes)
active_label[classes_encode.index(get_encoded_word(keyword))]=1
for word, frequency in fdist.most_common(k):
top_relation.append(word)
active_label[classes_encode.index(word)]=frequency
print(word+' '+str(frequency),end='|')
print()
return top_relation,active_label
#%%
tokenizer = MWETokenizer()
print('create tokenizer')
for idx_c,clss in enumerate(classes):
words=tokenize(clss)
# print(words)
tokenizer.add_mwe(words)
print('Done')
#%%
classes_encode = [get_encoded_word(name) for name in classes]
classes_NN = classes[np.array(poss)=='NN']
#%%
df_summary = | pd.DataFrame() | pandas.DataFrame |
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from timeseries import slice_by_timestamp
from yearly import replace_year
def set_to_begin(values):
"""Set the dates and times in the list to the begin of the month
:param values:
:type values:
:return:
:rtype:
"""
return [pd.Timestamp(v).replace(day=1, hour=0, minute=0, second=0, microsecond=0) for v in values]
def set_to_end(values):
"""Set the dates and times in the list to the end of the month
:param values:
:type values:
:return:
:rtype:
"""
try:
return [pd.Timestamp(v).replace(day=last_day(v), hour=23, minute=59, second=59, microsecond=999999) for v in values]
except TypeError:
return pd.Timestamp(values).replace(day=last_day(values), hour=23, minute=59, second=59, microsecond=999999)
def last_day(dt):
return (pd.Timestamp(dt) + pd.tseries.offsets.MonthEnd(n=0)).day
def is_last_day(dt):
"""Check whether day in ``dt`` is the last day of the month
:param dt: datetime
:type dt: datetime, pd.Timestamp, np.datetime64
:return: True/False
:rtype: bool
"""
return pd.Timestamp(dt).day == last_day(dt)
def increment(dt, months=1, microseconds=0):
"""Increment ``ts`` by ``months``. Default is to increment one month. Return a ``pd.Timestamp``
:param dt: timestamp
:type dt: datetime, pd.Timestamp, np.datetime64
:param months: number of months to increment. Negative values are allowed. Default months = 1
:type months: int
:param microseconds: microseconds to add to the right interval: 0 for closed, -1 for right opened interval
:type microseconds: int
:return: ts incremented by ``months``
:rtype: pd.Timestamp
"""
# Don't use pd.Timedelta:
# pd.Timestamp('2000-12-30 07:30') + pd.Timedelta(1, unit='M') == Timestamp('2001-01-29 17:59:06')
dt = pd.Timestamp(dt)
ts1 = pd.Timestamp(pd.Timestamp(dt).to_pydatetime() + relativedelta(months=months, microseconds=microseconds))
if is_last_day(dt):
return ts1.replace(day=1) + pd.tseries.offsets.MonthEnd(n=1)
else:
return ts1
def intervals(indices, months=None, accum=1, start_at=None, closed_left=True, closed_right=True):
"""
:param indices: list of timestamps
:type indices: pd.DatetimeIndex
:param months: months to get intervals
:type months: list
:param accum: number of accumulated months
:type accum: int
:param start_at: date and time to start. Only the day of date will be used, year and month are discarded.
:type start_at: datetime.datetime, str
:param closed_left: left close interval
:type closed_left: bool
:param closed_right: right close interval
:type closed_right: bool
:return: list of intervals [[begin0, end0], [begin1, end1], ..., [beginN, endN]]
:rtype: list of [pd.Timestamp, pd.Timestamp]
"""
if not months:
months = range(1, 13)
if start_at == 'beg':
start_at = datetime.datetime(2000, 1, 1, 0, 0, 0, 0)
elif start_at == 'end':
start_at = indices[0].replace(day=last_day(indices[0])).replace(hour=23, minute=59, second=59,
microsecond=999999)
elif start_at is None:
start_at = indices[0]
tuples = list()
ts0 = replace_year(start_at, indices[0].year)
while ts0 <= indices[-1]:
if ts0.month in months:
tuples.append([ts0, increment(ts0, accum)] if accum > 0 else [increment(ts0, accum), ts0])
ts0 = increment(ts0, 1)
if not closed_right:
tuples = [[ts0, ts1 - pd.Timedelta(1, unit='us')] for ts0, ts1 in tuples]
if not closed_left:
tuples = [[ts0 + pd.Timedelta(1, unit='us'), ts1] for ts0, ts1 in tuples]
return tuples
def series_starting_at(series, rule='sum', months=None, accum=1, start_at=None, closed_left=True, closed_right=False,
label='right', is_sorted=False):
"""Beginning at ``begin``, return the series resampled to the months listed in ``months``,
taking ``accum`` adjacent months. The default resampling rule is ``sum``.
:param series:
:type series: DataFrame, Series
:param rule:
:type rule: DataFrame, Series
:param months: If months is None, all 12 months will be used.
:type months: list, NoneType
:param accum: number of months to accumulate (default 1). It may be also negative.
:type accum: int
:param start_at: datetime, 'beg', or 'end'
:type start_at: datetime.datetime, str
:param closed_left: left close interval
:type closed_left: bool
:param closed_right: right close interval
:type closed_right: bool
:param label:
:type label:
:param is_sorted:
:type is_sorted:
:return:
:rtype: DataFrame, Series
"""
if not is_sorted:
series = series.sort_index()
index0 = series.index[0]
index1 = series.index[-1]
if label == 'right':
index1 += pd.DateOffset(days=1)
tdf = zip(*[[end, getattr(slice_by_timestamp(series, beg, end), rule)()]
for beg, end in intervals(series.index, months=months, accum=accum, start_at=start_at,
closed_left=closed_left, closed_right=closed_right)
if beg >= index0 and end <= index1])
elif label == 'left':
tdf = zip(*[[beg, getattr(slice_by_timestamp(series, beg, end), rule)()]
for beg, end in intervals(series.index, months=months, accum=accum, start_at=start_at,
closed_left=closed_left, closed_right=closed_right)
if beg >= index0 and end <= index1])
else:
assert False
try:
df = pd.concat(tdf[1][:], axis=1).transpose().set_index(pd.DatetimeIndex(tdf[0]))
return df
except TypeError:
return pd.Series(tdf[1], index=tdf[0])
# def monthly(series, rule='sum', months=None, accum=1, closed='left', label='right'):
# """
#
# :param series: Series
# :type series: pandas.Series
# :param months: months
# :type months: list
# :param accum: number of months to aggregate
# :type accum: int
# :param closed:
# :type closed:
# :param label:
# :type label:
# :return:
# :rtype:
# """
# if not months:
# result = series.resample(rule=str(accum) + 'M', closed=closed, label=label).sum()
# else:
# result = None
# for m in months:
# if accum > 0:
# srm = monthly(series.loc[(series.index.month >= m) & (series.index.month < m + accum)], accum=accum).dropna(how='all')
# else:
# srm = monthly(series.loc[(series.index.month >= m + accum) & (series.index.month < m)], accum=accum).dropna(how='all')
# result = pd.concat([result, srm])
# if result is not None:
# result = result.sort_index()
# return result
def split_monthly_data_annually(sr, months=range(1, 13), n=1, closed='left', label='right', prefix='M'):
"""Aggregate data monthly according to the number of months, closed and label
:param sr:
:param months:
:param n:
:param closed:
:param label:
:param prefix:
:return:
"""
months = [m for m in months if m in set(sr.index.month)]
df = pd.DataFrame(index=range(min(sr.index.year), max(sr.index.year)+1))
for m in months:
srm = sr.loc[(sr.index.month == m)]
srm.index = srm.index.year
df1 = | pd.DataFrame(index=srm.index) | pandas.DataFrame |
import argparse
import numpy as np
import os
import pandas as pd
import re
import joblib
import json
from sklearn.ensemble import RandomForestRegressor
from sagemaker_training import environment
def parse_args():
"""
Parse arguments.
"""
env = environment.Environment()
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument("--max-depth", type=int, default=10)
parser.add_argument("--n-jobs", type=int, default=env.num_cpus)
parser.add_argument("--n-estimators", type=int, default=120)
# data directories
parser.add_argument("--train", type=str, default=os.environ.get("SM_CHANNEL_TRAIN"))
parser.add_argument("--test", type=str, default=os.environ.get("SM_CHANNEL_TEST"))
# model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument("--model_dir", type=str, default=os.environ.get("SM_MODEL_DIR"))
return parser.parse_known_args()
def load_dataset(path):
"""
Load entire dataset.
"""
# Take the set of files and read them all into a single pandas dataframe
files = [
os.path.join(path, file) for file in os.listdir(path) if file.endswith("csv")
]
if len(files) == 0:
raise ValueError("Invalid # of files in dir: {}".format(path))
raw_data = [ | pd.read_csv(file, sep=",", header=None) | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from scipy.interpolate import interp1d
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# -------------------------------------------------------------------------------- 5.1 Approximation Demand and Supply
# ---------- Demand and Supply Functions ----------
def demand(p):
"""Vectorized Function to determine *demand*.
Args:
p (np.array): Price vector for demand.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns demand quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
r = np.random.rand() * 2
n = abs(np.random.randn()) * 2
q = (
40 / (p + n)
+ 1 / (1 + np.exp(p - 75 + r))
+ 2 / (1 + np.exp(p - 50 + r))
+ 3 / (1 + np.exp(p - 25 + r))
)
q[q > 20] = np.nan
assert type(q) == type(p), "Type of output does not equal type of input!"
return q
def supply(p):
"""Vectorized Function to determine *supply.*
Args:
p (np.array): Price vector for supply.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns supply quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
q = np.zeros(p.shape)
for i, c in enumerate(p):
if (c > 0) and (c < 10):
q[i] = 1.0
elif (c >= 10) and (c < 20):
q[i] = 1.5
elif (c >= 20) and (c < 25):
q[i] = 3.0
elif (c >= 25) and (c < 35):
q[i] = 3.6
elif (c >= 35) and (c < 45):
q[i] = 4.2
elif (c >= 45) and (c < 60):
q[i] = 5.0
elif (c >= 60) and (c < 75):
q[i] = 8.0
elif (c >= 75) and (c < 85):
q[i] = 12.0
elif (c >= 85) and (c < 90):
q[i] = 16.5
elif (c >= 90) and (c < 95):
q[i] = 18.5
elif c >= 95:
q[i] = 20.0
assert type(q) == type(p), "Type of output does not equals type of input!"
return q
# ---------- Approximation using scipy ----------
class PolynomialDS:
"""Object that approximates supply and demand functions using sicpy
interpolate method.
Args:
a (int): Lower bound of prices.
b (int): Upper bound of prices.
nodes (int): Interpolation nodes for demand and supply.
demand (function): Benchmark function supply.
supply (function): Benchmark function demand.
Raises:
AssertionError: Price must be non-negative.
AssertionError: By Assumption: price cannot exceed 100.
"""
def __init__(self, a, b, nodes, demand, supply):
"""Constructor method.
"""
self.a = a
self.b = b
assert a >= 0, "Price cannot be negative!"
assert (b > a) and (b <= 100), "By Assumption: Price cannot exceed 100!"
self.nodes = nodes
self.demand = demand
self.supply = supply
self.p = np.linspace(a, b, nodes)
self.qd = demand(self.p)
self.qs = supply(self.p)
def __len__(self):
"""Returns number of interpolation nodes.
Returns:
int: Number of known prices.
"""
return len(self.p)
def __repr__(self):
"""String representation of object.
"""
p = np.around(self.p, decimals=2)
qd = np.around(self.qd, decimals=2)
qs = np.around(self.qs, decimals=2)
return f"{len(self)} known values for Demand and Supply:\n\nPrices={p} \n\nDemand={qd} \nSupply={qs}"
def __call__(self, p):
"""Returns true and approximated value of demand and supply for a
given price.
Args:
p (np.array): Price vector.
Returns:
: Comparison.
"""
self.apprx_qd = interp1d(self.p, self.qd)
self.apprx_qs = interp1d(self.p, self.qs)
return f"-- Real value -- at price {p}: \n\nDemand = {self.demand(p)} \nSupply = {self.supply(p)} \n\n-- Approximated value -- at price {p}: \n\nDemand = {self.apprx_qd(p)} \nSupply = {self.apprx_qs(p)}"
@staticmethod
def __name__():
"""Returns the name of the object.
"""
return "Demand and Supply Interpolator"
def plt_approx(self, fs=(14, 7), num1=16.1, num2=16.2, num3=16.3, num4=16.4):
"""Plots Approximation and true supply as well as demand.
Args:
fs (tuple, optional): Figuresize. Defaults to (14, 7).
num1 (float, optional): Number first figure. Defaults to 16.1.
num2 (float, optional): Number second figure. Defaults to 16.2.
num3 (float, optional): Number third figure. Defaults to 16.3.
num4 (float, optional): Number fourth figure. Defaults to 16.4.
"""
prices = np.linspace(self.a, self.b, self.nodes * 150)
apprx_qd = self.apprx_qd(prices)
apprx_qs = self.apprx_qs(prices)
qd = self.demand(prices)
qs = self.supply(prices)
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
ax1[0].plot(self.qd, self.p, "o", label="Nodes Demand", color="#4B045D")
ax1[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax1[0].plot(qd, prices, label="Real Demand", alpha=0.7, color="#D98D08")
ax1[0].set_title(f"Figure {num1}: Approximation of Demand")
ax1[0].legend(loc="center right")
ax1[0].grid()
ax1[1].plot(self.qs, self.p, "o", label="Nodes Supply", color="#4B045D")
ax1[1].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax1[1].plot(qs, prices, label="Real Supply", alpha=0.7, color="#67853E")
ax1[1].set_title(f"Figure {num2}: Approximation of Supply")
ax1[1].legend(loc="center right")
ax1[1].grid()
ax2[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax2[0].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax2[0].set_title(f"Figure {num3}: Approximated Demand and Supply")
ax2[0].legend(loc="center right")
ax2[0].grid()
ax2[1].plot(qd, prices, label="Real Demand", color="#D98D08")
ax2[1].plot(qs, prices, label="Real Supply", color="#67853E")
ax2[1].set_title(f"Figure {num4}: True Demand and Supply")
ax2[1].legend(loc="center right")
ax2[1].grid()
plt.show()
abs_error_qd = np.array(abs(qd - apprx_qd))
abs_error_qd = abs_error_qd[~np.isnan(abs_error_qd)]
abs_error_qs = np.array(abs(qs - apprx_qs))
print(
f"Mean Absolute Error: \n\nDemand = {abs_error_qd.mean():.4f} \nSupply = {abs_error_qs.mean():.4f}"
)
def close_intersection(self, nodes=1000000):
"""Returns true and approximated market equilibrium.
Args:
nodes (int, optional): Number of interpolation nodes. Defaults to 1000000.
"""
prices = np.linspace(self.a, self.b, nodes)
f = lambda p: self.demand(p) - self.supply(p)
abs_sd = f(prices)
abs_sd = abs_sd[~np.isnan(abs_sd)]
argmin = abs(abs_sd).argmin()
pe = prices[argmin]
qe_demand = np.around(demand(np.array([pe])), decimals=3)
qe_supply = np.around(supply(np.array([pe])), decimals=3)
g = lambda p: self.apprx_qd(p) - self.apprx_qs(p)
abs_asd = f(prices)
abs_asd = abs_asd[~np.isnan(abs_asd)]
argmin_a = abs(abs_asd).argmin()
pea = prices[argmin_a]
aqe_demand = np.around(self.apprx_qd(np.array([pea])), decimals=3)
aqe_supply = np.around(self.apprx_qs(np.array([pea])), decimals=3)
print(
f"Equilibrium True (Quantity, Price) \n*** *** *** *** \nDemand: {(qe_demand[0], np.around(pe, decimals=3))} \nSupply: {(qe_supply[0], np.around(pe, decimals=3))}\n"
)
print(
f"Equilibrium Approximation (Quantity, Price) \n*** *** *** *** \nDemand: {(aqe_demand[0], np.around(pea, decimals=3))} \nSupply: {(aqe_supply[0], np.around(pea, decimals=3))}"
)
# ---------- Approximation using ML ----------
class AISupplyDemandApprox:
"""Object that approximates supply and demand using various ML methods.
Args:
nodes (int): Number of known nodes.
supply (function): Unknown supply function.
demand (function): Unknown demand function.
a (int, optional): Lower bound of prices. Defaults to 0.
b (int, optional): Upper bound of prices. Defaults to 100.
ts (float, optional): Size of testing data. Defaults to 0.4.
rs (int, optional): Random state. Defaults to 42.
Raises:
AssertionError: Price must be non-negative.
AssertionError: Training data includes nan values.
AssertionError: Testing data includes nan values.
"""
def __init__(self, nodes, supply, demand, a=0, b=100, ts=0.4, rs=42):
"""Constructor method.
"""
assert a >= 0, "Price must be Non Negative!"
p = np.linspace(a, b, nodes)
q = supply(p)
qd = demand(p)
p_train, p_test, q_train, q_test = train_test_split(
p, q, test_size=ts, random_state=rs
)
pd_train, pd_test, qd_train, qd_test = train_test_split(
p, qd, test_size=ts, random_state=rs
)
self.p_train = p_train.reshape(-1, 1) # reshape data
self.p_test = p_test.reshape(-1, 1) # reshape data
self.q_train = q_train.reshape(-1, 1) # reshape data
self.q_test = q_test.reshape(-1, 1) # reshape data
nan_ind = np.argwhere(np.isnan(qd_train)) # select index of nan values
qd_train_mod = np.delete(qd_train, nan_ind) # delete nan index value
pd_train_mod = np.delete(pd_train, nan_ind)
self.pd_train = pd_train_mod.reshape(-1, 1)
self.pd_test = pd_test.reshape(-1, 1)
self.qd_train = qd_train_mod.reshape(-1, 1)
self.qd_test = qd_test.reshape(-1, 1)
assert np.isnan(self.pd_train).all() == False, "There are nan Values!"
assert np.isnan(self.pd_test).all() == False, "There are nan Values!"
@staticmethod
def __name__():
"""Returns name of AISupplyDemandApprox object.
"""
return "Modern-ML Demand and Supply Interpolator"
def plots(
self,
colors=["teal", "yellowgreen", "gold"],
label=["Training Values", "Testing Values"] * 2,
markers=["x", "*", "v"],
n_neighbors=4,
degrees=[3, 6],
weight="distance",
fs=(15, 10),
num1=17.1,
num2=17.2,
num3=17.3,
num4=17.4,
):
"""Plots approximation results as well as training and testing data.
Args:
colors (list, optional): Colors of approximation results. Defaults
to ["teal", "yellowgreen", "gold"].
label (list, optional): Labels of training and testing data.
Defaults to ["Training Values", "Testing Values"]*2.
markers (list, optional): Markers of approximation. Defaults
to ["x", "*", "v"].
n_neighbors (int, optional): Number of k-nearest neighbors. Defaults to 4.
degrees (list, optional): Number of degrees for Linear Regression.
Defaults to [3, 6].
weight (str, optional): Weight of KNN Regression. Defaults to "distance".
fs (tuple, optional): Figuresize. Defaults to (15, 10)
num1 (float, optional): Number of first Figure. Defaults to 17.1.
num2 (float, optional): Number of second Figure. Defaults to 17.2.
num3 (float, optional): Number of third Figure. Defaults to 17.3.
num4 (float, optional): Number of fourth Figure. Defaults to 17.4.
Raises:
AssertionError: Length of degrees is out of range.
"""
self.degrees = degrees
assert len(degrees) == 2, "List out of range!"
qsup, psup = [self.q_train, self.q_test], [self.p_train, self.p_test]
qdem, pdem = [self.qd_train, self.qd_test], [self.pd_train, self.pd_test]
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
for i, (qs, ps, qd, pd) in enumerate(zip(qsup, psup, qdem, pdem)):
for ax in [ax1[0], ax1[1]]:
ax.plot(qs, ps, "o", ms=4, label=label[i])
for ax in [ax2[0], ax2[1]]:
ax.plot(qd, pd, "o", ms=4, label=label[i])
self.maes, self.maed = [], []
self.mses, self.msed = [], []
self.evss, self.evsd = [], []
self.r2s, self.r2d = [], []
for i, ax in enumerate([ax1, ax2]):
for j, d in enumerate(degrees):
model = make_pipeline(PolynomialFeatures(d), LinearRegression())
if i == 0:
model.fit(self.p_train, self.q_train)
pred = model.predict(self.p_test)
ax[i].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.p_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(
f"Figure {num1}: Linear Regression Approximation Supply"
)
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
model.fit(self.pd_train, self.qd_train)
pred = model.predict(self.pd_test)
ax[i - 1].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i - 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i - 1].set_title(
f"Figure {num3}: Linear Regression Approximation Demand"
)
ax[i - 1].grid(True)
ax[i - 1].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
methods = ["KNN", "DecisionTree"]
knn = KNeighborsRegressor(n_neighbors, weights=weight)
tree = DecisionTreeRegressor()
for i, ax in enumerate([ax1, ax2]):
for j, m in enumerate([knn, tree]):
if i == 0:
m.fit(self.p_train, self.q_train)
pred = m.predict(self.p_test)
ax[i + 1].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i + 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i + 1].set_title(
f"Figure {num2}: KNN and DT Approximation Supply"
)
ax[i + 1].grid(True)
ax[i + 1].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
m.fit(self.pd_train, self.qd_train)
pred = m.predict(self.pd_test)
ax[i].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(f"Figure {num4}: KNN and DT Approximation Demand")
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
plt.show()
def reslts_as_frame(self, num=14):
"""Returns accuracy of approximation using ML.
Args:
num (int, float, optional): Number of dataframe. Defaults to 14.
Returns:
pd.DataFrame: Accuracy of approximation.
"""
d1, d2 = self.degrees[0], self.degrees[1]
index_as_array_sup = [
np.array(["Supply"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
index_as_array_dem = [
np.array(["Demand"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
col = [
"Mean Absolute Error",
"Mean Squared Error",
"Explained Variance Score",
"$R^2$-Score",
]
data_supply = pd.concat(
[
pd.DataFrame(self.maes, index=index_as_array_sup),
pd.DataFrame(self.mses, index=index_as_array_sup),
| pd.DataFrame(self.evss, index=index_as_array_sup) | pandas.DataFrame |
# General
import os
import math
from datetime import datetime, timedelta
import dateutil.parser
import warnings
from numpy.core.numeric import Inf
# Data Science
import pandas as pd
import numpy as np
from scipy.sparse import base
from sklearn import linear_model
from sklearn.metrics import r2_score, mean_absolute_error
from sklearn.model_selection import KFold
import scipy
# Plotting
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from src.visualization import visualize
class Calibration():
def __init__(self, start_time, end_time, data_dir="../../data/", study="utx000", study_suffix="ux_s20", **kwargs):
"""
Initiates the calibration object
Inputs:
- start_time: datetime object with precision to the minute specifying the event START time
- end_time: datetime object with precision to the minute specifying the event END time
- data_dir: path to data directory
- study: string of the study name
- study_suffix: string of the suffix associated with the study
Keyword Arguments:
- resample_rate: integer corresponding to the resample rate in minutes - default is 1 minute
- timestamp: datetime specifying the start time as reported by the laptop
"""
self.set_start_time(start_time)
self.set_end_time(end_time)
if "ref_date" in kwargs.keys():
self.date = kwargs["ref_date"].date().strftime("%m%d%Y")
else:
self.date = end_time.date().strftime("%m%d%Y")
self.data_dir = data_dir
self.study = study
self.suffix = study_suffix
self.set_time_offset(**kwargs)
# kwargs
if "resample_rate" in kwargs.keys():
self.set_resample_rate(kwargs["resample_rate"])
else:
self.set_resample_rate(1) # set to default
if "beacons" in kwargs.keys():
self.set_beacons(kwargs["beacons"])
else:
self.set_beacons(kwargs["beacons"])
# data
## beacon
print("IMPORTING BEACON DATA")
if self.study == "utx000":
self.set_utx000_beacon(**kwargs)
else:
self.set_wcwh_beacon(**kwargs)
## refererence
print("IMPORTING REFERENCE DATA")
self.ref = {}
self.set_ref(**kwargs)
## calibration
self.offsets = {}
self.lms = {}
# experiment detail setters
def set_start_time(self, t):
"""sets the calibration start time"""
self.start_time = t
def set_end_time(self, t):
"""sets the calibration end_time"""
self.end_time = t
def set_resample_rate(self, rate):
"""sets the class resample rate"""
self.resample_rate = rate
def set_time_offset(self, **kwargs):
"""
Sets the offset time for measurements because the laptop time is incorrect
Keyword Arguments:
- timestamp: datetime specifying the start time as reported by the laptop
"""
if "version" in kwargs.keys():
v = kwargs["version"]
else:
v = ""
if "timestamp" in kwargs.keys():
self.t_offset = self.start_time - kwargs["timestamp"]
else:
try:
# attempting to read pm_mass file to get the starting timestamp recorded by the computer
temp = pd.read_csv(f"{self.data_dir}calibration/pm_mass_{self.date}{v}.csv",skiprows=6,parse_dates={"timestamp": ["Date","Start Time"]},infer_datetime_format=True)
self.t_offset = self.start_time - temp["timestamp"].iloc[0]
except FileNotFoundError:
print("No file found - try providing a `timestamp` argument instead")
self.t_offset = 0
def set_beacons(self, beacon_list):
"""sets the list of beacons to be considered"""
self.beacons = beacon_list
# reference setters
def set_ref(self,ref_species=["pm_number","pm_mass","no2","no","co2","tvoc","co","t","rh"],**kwargs):
"""
Sets the reference data
Inputs:
ref_species: list of strings specifying the reference species data to import
"""
for species in ref_species:
if species in ["pm_number", "pm_mass"]:
self.set_pm_ref(species[3:],**kwargs)
elif species == "no2":
self.set_no2_ref(**kwargs)
elif species == "co2":
self.set_co2_ref(**kwargs)
elif species == "no":
self.set_no_ref(**kwargs)
elif species == "tvoc" and len(self.beacon_data) > 1:
self.set_tvoc_ref()
elif species == "t" or species == "rh":
self.set_trh_ref(**kwargs)
else:
self.set_zero_baseline(species=species)
def set_zero_baseline(self,species="co"):
"""
Sets reference of species species to zero (clean) background
Inputs:
- species: string representing the pollutant species to save to the reference dictionary
"""
dts = pd.date_range(self.start_time,self.end_time,freq=f'{self.resample_rate}T') # timestamps between start and end
df = pd.DataFrame(data=np.zeros(len(dts)),index=dts,columns=["concentration"]) # creating dummy dataframe
df.index.rename("timestamp",inplace=True)
self.ref[species] = df
def set_pm_ref(self, concentration_type="mass",**kwargs):
"""
Sets the reference PM data
Inputs:
- concentration_type: string of either "mass" or "number"
Returns a dataframe with columns PM1, PM2.5, and PM10 indexed by timestamp
"""
# import data and correct timestamp
if "version" in kwargs.keys():
v = kwargs["version"]
else:
v = ""
try:
raw_data = pd.read_csv(f"{self.data_dir}calibration/pm_{concentration_type}_{self.date}{v}.csv",skiprows=6)
except FileNotFoundError:
print(f"File not found - {self.data_dir}calibration/pm_{concentration_type}_{self.date}{v}.csv")
return
df = raw_data.drop(['Sample #','Aerodynamic Diameter'],axis=1)
date = df['Date']
sample_time = df['Start Time']
datetimes = []
for i in range(len(date)):
datetimes.append(datetime.strptime(date[i] + ' ' + sample_time[i],'%m/%d/%y %H:%M:%S') + self.t_offset)
df['timestamp'] = datetimes
df.set_index(['timestamp'],inplace=True)
df = df.iloc[:,:54]
df.drop(['Date','Start Time'],axis=1,inplace=True)
# convert all columns to numeric types
for column in df.columns:
df[column] = pd.to_numeric(df[column])
# correct for units
if concentration_type == "mass":
factor = 1000
else:
factor = 1
# sum columns for particular size concentrations
df['pm1'] = df.iloc[:,:10].sum(axis=1)*factor
df['pm2p5'] = df.iloc[:,:23].sum(axis=1)*factor
df['pm10'] = df.iloc[:,:42].sum(axis=1)*factor
# resample
if "window" in kwargs.keys():
window = kwargs["window"]
else:
window = 5 # defaults to window size of 5
df_resampled = df.resample(f"{self.resample_rate}T").mean().rolling(window=window,min_periods=1).mean().bfill()
df_resampled = df_resampled[self.start_time:self.end_time]
# setting
for size in ["pm1","pm2p5","pm10"]:
self.ref[f"{size}_{concentration_type}"] = pd.DataFrame(df_resampled[size]).rename(columns={size:"concentration"})
def set_co2_ref(self,**kwargs):
"""sets the reference CO2 data"""
if "version" in kwargs.keys():
v = kwargs["version"]
else:
v = ""
try:
raw_data = pd.read_csv(f"{self.data_dir}calibration/co2_{self.date}{v}.csv",usecols=[0,1],names=["timestamp","concentration"])
except FileNotFoundError:
print(f"File not found - {self.data_dir}calibration/co2_{self.date}{v}.csv")
return
raw_data["timestamp"] = pd.to_datetime(raw_data["timestamp"],yearfirst=True)
raw_data.set_index("timestamp",inplace=True)
raw_data.index += self.t_offset# = df.shift(periods=3)
if "window" in kwargs.keys():
window = kwargs["window"]
else:
window = 5 # defaults to window size of 5
df = raw_data.resample(f"{self.resample_rate}T",closed="left").mean().rolling(window=window,min_periods=1).mean().bfill()
self.ref["co2"] = df[self.start_time:self.end_time]
def set_trh_ref(self,**kwargs):
"sets the reference temperature and relative humidity"
if "version" in kwargs.keys():
v = kwargs["version"]
else:
v = ""
try:
raw_data = pd.read_csv(f"../data/calibration/trh_{self.date}{v}.csv",skiprows=11,
usecols=["Date","Time","Temp","%RH"],parse_dates=[["Date","Time"]],infer_datetime_format=True)
except FileNotFoundError:
print(f"File not found - {self.data_dir}calibration/trh_{self.date}{v}.csv")
return
raw_data.columns = ["timestamp","t_c","rh"]
raw_data.dropna(inplace=True)
raw_data["timestamp"] = pd.to_datetime(raw_data["timestamp"],yearfirst=False,dayfirst=True)
raw_data.set_index("timestamp",inplace=True)
if "window" in kwargs.keys():
window = kwargs["window"]
else:
window = 3 # defaults to window size of 3
df = raw_data.resample(f"{self.resample_rate}T",closed="left").mean().rolling(window=window,min_periods=1).mean().bfill()
df = df[self.start_time:self.end_time]
df_t = pd.DataFrame(df["t_c"])
df_rh = pd.DataFrame(df["rh"])
# renamining to match other reference data
df_t.columns = ["concentration"]
df_rh.columns = ["concentration"]
# saving to ref dict
self.ref["temperature_c"] = df_t
self.ref["rh"] = df_rh
def set_no2_ref(self,**kwargs):
"""sets the reference NO2 data"""
if "version" in kwargs.keys():
v = kwargs["version"]
else:
v = ""
try:
raw_data = pd.read_csv(f"{self.data_dir}calibration/no2_{self.date}{v}.csv",usecols=["IgorTime","Concentration"])
except FileNotFoundError:
print(f"File not found - {self.data_dir}calibration/no2_{self.date}{v}.csv")
return
# Using igor time (time since Jan 1st, 1904) to get timestamp
ts = []
for seconds in raw_data["IgorTime"]:
ts.append(datetime(1904,1,1) + timedelta(seconds=int(seconds))+self.t_offset)
raw_data["timestamp"] = ts
raw_data.set_index("timestamp",inplace=True)
raw_data.drop("IgorTime",axis=1,inplace=True)
df = raw_data.resample(f"{self.resample_rate}T").mean()
df.columns = ["concentration"]
self.ref["no2"] = df[self.start_time:self.end_time]
def set_no_ref(self,**kwargs):
"""sets the reference no data """
try:
raw_data = pd.read_csv(f"{self.data_dir}calibration/no_{self.date}.csv",names=["timestamp","concentration"],skiprows=1)
except FileNotFoundError:
print(f"File not found - {self.data_dir}calibration/no_{self.date}.csv")
return
raw_data["timestamp"] = pd.to_datetime(raw_data["timestamp"])
raw_data.set_index("timestamp",inplace=True)
df = raw_data.resample(f"{self.resample_rate}T").mean()
self.ref["no"] = df[self.start_time:self.end_time]
def set_tvoc_ref(self):
"""sets the tvoc reference as the mean concentration at each timestamp"""
raw_data = self.beacon_data[["timestamp","tvoc","beacon"]].pivot(index="timestamp",columns="beacon",values="tvoc").dropna(axis=1)
raw_data["concentration"] = raw_data.mean(axis=1)
self.ref["tvoc"] = raw_data[["concentration"]]
# beacon setters
def set_beacon_data(self,data):
"""sets the beacon data attribute with given data"""
self.beacon_data = data
def set_utx000_beacon(self,verbose=False,**kwargs):
"""
Sets beacon data from utx000 for calibration
Inputs:
- beacon_list: list of integers specifying the beacons to consider
- resample_rate: integer specifying the resample rate in minutes
- verbose: boolean to have verbose mode on
"""
self.beacons = kwargs["beacons"]
beacon_data = pd.DataFrame() # dataframe to hold the final set of data
beacons_folder=f"{self.data_dir}raw/{self.study}/beacon"
# list of all beacons used in the study
if verbose:
print('Processing beacon data...\n\tReading for beacon:')
for beacon in self.beacons:
# correcting the number since the values <10 have leading zero in directory
number = f'{beacon:02}'
if verbose:
print(f'\t{number}')
file_count = 0
beacon_folder=f'{beacons_folder}/B{number}'
for file in os.listdir(f'{beacon_folder}/adafruit'):
if file.endswith('.csv'):
file_count += 1
if file_count > 0:
beacon_df = pd.DataFrame() # dataframe specific to the beacon
def import_and_merge(csv_dir,number):
df_list = []
for file in os.listdir(csv_dir+'/'):
try:
# reading in raw data (csv for one day at a time) and appending it to the overal dataframe
day_df = pd.read_csv(f'{csv_dir}/{file}',
index_col='Timestamp',parse_dates=True,
infer_datetime_format=True)
df_list.append(day_df)
except Exception:
# for whatever reason, some files have header issues - these are moved to purgatory to undergo triage
if verbose:
print(f'\t\tIssue encountered while importing {csv_dir}/{file}, skipping...')
if "window" in kwargs.keys():
window = kwargs["window"]
else:
window = 5 # defaults to window size of 5
df = pd.concat(df_list).resample(f'{self.resample_rate}T').mean().rolling(window=window,min_periods=1).mean().bfill()
return df
# Python3 Sensors
# ---------------
py3_df = import_and_merge(f'{beacon_folder}/adafruit', number)
# Changing NO2 readings on beacons without NO2 readings to CO (wiring issues - see Hagen)
if int(number) > 27:
if verbose:
print('\t\tNo NO2 sensor - removing values')
py3_df[['CO','T_CO','RH_CO']] = py3_df[['NO2','T_NO2','RH_NO2']]
py3_df[['NO2','T_NO2','RH_NO2']] = np.nan
py3_df['CO'] /= 1000 # converting ppb measurements to ppm
# Python2 Sensors
# ---------------
py2_df = import_and_merge(f'{beacon_folder}/sensirion', number)
# merging python2 and 3 sensor dataframes
beacon_df = py3_df.merge(right=py2_df,left_index=True,right_index=True,how='outer')
# getting relevant data only
if "start_time" in kwargs.keys():
beacon_df = beacon_df[kwargs["start_time"]:]
else:
beacon_df = beacon_df[self.start_time:self.end_time]
beacon_df.drop(['eCO2','Visible','Infrared',"T_CO","RH_CO","T_NO2","RH_NO2",'Temperature [C]','Relative Humidity','PM_N_0p5','PM_N_4','PM_C_4'],axis=1,inplace=True)
# concatenating the data to the overall dataframe
beacon_df['beacon'] = beacon
beacon_data = pd.concat([beacon_data,beacon_df])
beacon_data.reset_index(inplace=True)
beacon_data.columns = ["timestamp","tvoc","light","no2","co","co2","pm1_number","pm2p5_number","pm10_number","pm1_mass","pm2p5_mass","pm10_mass","beacon"]
beacon_data = beacon_data[beacon_data["beacon"] != 0] # correcting for any mislabeled raw data
self.beacon_data = beacon_data
def set_wcwh_beacon(self, verbose=False, **kwargs):
"""sets beacon data from wcwh pilot for calibration"""
data = pd.DataFrame()
for beacon in self.beacons:
number = f'{beacon:02}'
data_by_beacon = pd.DataFrame()
if verbose:
print("Beacon", beacon)
try:
for file in os.listdir(f"{self.data_dir}raw/{self.study}/beacon/B{number}/DATA/"):
if file[-1] == "v":
if verbose:
print("\t" + file)
try:
temp = pd.read_csv(f"{self.data_dir}raw/{self.study}/beacon/B{number}/DATA/{file}")
if len(temp) > 0:
data_by_beacon = data_by_beacon.append(temp)
except Exception as e:
print("Error with file", file+":", e)
if len(data_by_beacon) > 0:
data_by_beacon["Timestamp"] = pd.to_datetime(data_by_beacon["Timestamp"])
data_by_beacon = data_by_beacon.dropna(subset=["Timestamp"]).set_index("Timestamp").sort_index()[self.start_time:self.end_time].resample(f"{self.resample_rate}T").mean()
data_by_beacon["beacon"] = int(number)
# looking for any moving mean/median filters
if "window" in kwargs.keys():
window = kwargs["window"]
else:
window = 5 # defaults to window size of 5
if "moving" in kwargs.keys():
if kwargs["moving"] == "median":
data = data.append(data_by_beacon.rolling(window=window,min_periods=1).median().bfill())
else:
data = data.append(data_by_beacon.rolling(window=window,min_periods=1).mean().bfill())
else:
data = data.append(data_by_beacon)
except FileNotFoundError:
print(f"No files found for beacon {beacon}.")
data['temperature_c'] = data[['T_CO','T_NO2']].mean(axis=1)
data['rh'] = data[['RH_CO','RH_NO2']].mean(axis=1)
data.drop(["eCO2","Visible","Infrared","Temperature [C]","Relative Humidity","PM_N_0p5","T_CO","T_NO2","RH_CO","RH_NO2"],axis="columns",inplace=True)
data = data[[column for column in data.columns if "4" not in column]]
data.reset_index(inplace=True)
#data.columns = ["timestamp","tvoc","lux","co","no2","pm1_number","pm2p5_number","pm10_number","pm1_mass","pm2p5_mass","pm10_mass","co2","beacon","temperature_c","rh"]
data.rename(columns={"Timestamp":"timestamp","TVOC":"tvoc","Lux":"lux","NO2":"no2","CO":"co","CO2":"co2",
"PM_N_1":"pm1_number","PM_N_2p5":"pm2p5_number","PM_N_10":"pm10_number",
"PM_C_1":"pm1_mass","PM_C_2p5":"pm2p5_mass","PM_C_10":"pm10_mass"},inplace=True)
data["co"] /= 1000
self.beacon_data = data
# beacon getters
def get_beacon(self,bb):
"""gets beacon data"""
return self.beacon_data[self.beacon_data["beacon"] == bb]
# visualizations
def inspect_by_beacon_by_param(self, species="co2"):
"""5x10 subplot showing timeseries of species"""
_, axes = plt.subplots(5,10,figsize=(20,10),sharex="col",gridspec_kw={"hspace":0.1,"wspace":0.3})
for beacon, ax in enumerate(axes.flat):
data_by_beacon = self.beacon_data[self.beacon_data["beacon"] == beacon].set_index("timestamp")
ax.plot(data_by_beacon.index,data_by_beacon[species])
# x-axis
ax.set_xlim(self.start_time, self.end_time)
ax.xaxis.set_visible(False)
# y-axis
if len(data_by_beacon) == 0:
ax.yaxis.set_visible(False)
#remainder
for loc in ["top","right","bottom"]:
ax.spines[loc].set_visible(False)
ax.set_title(beacon,y=1,pad=-6,loc="center",va="bottom")
def inspect_timeseries(self,species="co2",**kwargs):
"""
Plots timeseries of all beacons with an operation timeseries given below
Inputs:
- species: string specifying which ieq parameter to plot
Keyword Arguments:
- ylimits: list of two ints or floats specifying the upper and lower bounds
"""
fig, axes = plt.subplots(2,1,figsize=(20,10),sharex=True,gridspec_kw={"hspace":0})
for beacon in self.beacon_data["beacon"].unique():
data_by_beacon = self.beacon_data[self.beacon_data["beacon"] == beacon].set_index("timestamp")
# timeseries
ax = axes[0]
ax.plot(data_by_beacon.index,data_by_beacon[species],marker=visualize.get_marker(int(beacon)),label=beacon)
ax.set_xlim(left=self.start_time,right=self.end_time)
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%m/%d"))
ax.xaxis.set_minor_locator(mdates.HourLocator(interval=1))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H"))
ax.set_ylabel(visualize.get_pollutant_label(species) + " (" + visualize.get_pollutant_units(species) +")",fontsize=14)
if "ylimits" in kwargs.keys():
ax.set_ylim(kwargs["ylimits"])
ax.legend(title="Beacon",ncol=2,bbox_to_anchor=(1,1),frameon=False,title_fontsize=12,fontsize=10)
# operation
ax = axes[1]
data_by_beacon["op"] = data_by_beacon[species].notna()
ax.scatter(data_by_beacon.index,data_by_beacon["op"]+int(beacon)/50,marker=visualize.get_marker(int(beacon)),s=10,label=beacon)
# x-axis
ax.set_xlim(left=self.start_time,right=self.end_time)
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%m/%d"))
# y-axis
ax.set_ylim([-0.1,2.1])
# legend
ax.legend(title="Beacon",ncol=2,bbox_to_anchor=(1,1),frameon=False,title_fontsize=12,fontsize=10)
plt.show()
plt.close()
def inspect(self,df,timeseries=True):
"""
Visually inspect data in dataframe
Inputs:
- df: dataframe with one column with values or column named "beacons" that includes the beacon number
- timeseries: boolean specifying whether or not to plot the timeseries or not (therefore heatmap)
"""
if timeseries:
_, ax = plt.subplots(figsize=(16,6))
if "beacon" in df.columns:
for bb in df["beacon"].unique():
df_by_bb = df[df["beacon"] == bb]
ax.plot(df_by_bb.index,df_by_bb.iloc[:,0].values,marker=self.get_marker(int(bb)),label=bb)
else:
ax.plot(df.index,df.iloc[:,0].values,linewidth=3,color="black",label="Ref")
ax.legend(bbox_to_anchor=(1,1),frameon=False,ncol=2)
plt.show()
plt.close()
else: #heatmap
_, ax = plt.subplots(figsize=(14,7))
if "beacon" in df.columns:
df.columns=["concentration","beacon"]
df_to_plot = pd.DataFrame()
for bb in df["beacon"].unique():
df_by_bb = df[df["beacon"] == bb]
df_by_bb.drop("beacon",axis=1,inplace=True)
df_to_plot = pd.concat([df_to_plot,df_by_bb],axis=1)
df_to_plot.rename(columns={"concentration":bb}, inplace=True)
sns.heatmap(df_to_plot.T,vmin=np.nanmin(df_to_plot),vmax=np.nanmax(df_to_plot),ax=ax)
locs, labels = plt.xticks()
new_labels = []
for label in labels:
new_labels.append(dateutil.parser.isoparse(label.get_text()).strftime("%m-%d-%y %H:%M"))
plt.xticks(locs,new_labels,rotation=-45,ha="left")
plt.yticks(rotation=0,va="center")
else:
sns.heatmap(df.T,vmin=np.nanmin(df),vmax=np.nanmax(df),ax=ax)
# visuals
def compare_time_series(self,species,**kwargs):
"""
Plots reference and beacon data as a time series
Inputs:
- species: string specifying which ieq parameter to plot
Keyword Arguments:
- ax:
- beacons:
- data:
"""
if "ax" in kwargs.keys():
ax = kwargs["ax"]
else:
_, ax = plt.subplots(figsize=(17,6))
# plotting reference
ax.plot(self.ref[species].index,self.ref[species].iloc[:,0].values,linewidth=3,color="black",zorder=100,label="Reference")
# plotting beacon data
if "beacons" in kwargs.keys():
beacon_list = kwargs["beacons"]
else:
beacon_list = self.beacon_data["beacon"].unique()
for bb in beacon_list:
if "data" in kwargs.keys():
data_by_bb = kwargs["data"]
else:
data_by_bb = self.beacon_data[self.beacon_data["beacon"] == bb].set_index("timestamp")
data_by_bb.dropna(subset=[species],inplace=True)
if len(data_by_bb) > 0:
ax.plot(data_by_bb.index,data_by_bb[species],marker=visualize.get_marker(int(bb)),zorder=int(bb),label=bb)
# x-axis
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H'))
ax.xaxis.set_major_locator(mdates.HourLocator())
ax.xaxis.set_minor_locator(mdates.MinuteLocator(byminute=range(10,60,10)))
ax.xaxis.set_minor_formatter(mdates.DateFormatter('%M'))
ax.set_xlim([self.start_time,self.end_time])
# y_axis
ax.set_ylabel("Concentration",fontsize=14)
# remainder
ax.tick_params(axis="both",labelsize=12)
for loc in ["top","right"]:
ax.spines[loc].set_visible(False)
ax.legend(loc="upper center", bbox_to_anchor=(0.5,-0.05),frameon=False,title="Device",title_fontsize=12,fontsize=10,ncol=25)
if "ax" in kwargs.keys():
return ax
plt.show()
plt.close()
def compare_scatter(self,species,**kwargs):
"""
Scatter of measured points between beacons and reference
Inputs:
Keyword Arguments:
- ax:
- beacons:
- data:
- min_val:
"""
if "beacons" in kwargs.keys():
beacon_list = kwargs["beacons"]
else:
beacon_list = self.beacon_data["beacon"].unique()
for bb in beacon_list:
# getting data to plot
if "data" in kwargs.keys():
data_by_bb = kwargs["data"]
else:
data_by_bb = self.beacon_data[self.beacon_data["beacon"] == bb].set_index("timestamp")
comb = data_by_bb.merge(right=self.ref[species],left_index=True,right_index=True)
if "ax" in kwargs.keys():
ax = kwargs["ax"]
else:
_, ax = plt.subplots(figsize=(8,8))
im = ax.scatter(comb["concentration"],comb[species],c=comb.index,cmap="Blues",edgecolor="black",s=50,label="Measured",zorder=2)
#fig.colorbar(im,ax=ax,label="Minutes since Start")
max_val = max(np.nanmax(comb["concentration"]),np.nanmax(comb[species]))
if "min_val" in kwargs.keys():
min_val = kwargs["min_val"]
else:
min_val = 0
# 1:1
ax.plot([min_val,max_val],[min_val,max_val],color="firebrick",linewidth=2,zorder=1)
# x-axis
ax.set_xlabel("Reference Measurement",fontsize=14)
ax.set_xlim(left=min_val,right=max_val)
# y-axis
ax.set_ylabel("Beacon Measurement",fontsize=14)
ax.set_ylim(bottom=min_val,top=max_val)
# remainder
ax.tick_params(axis="both",labelsize=12)
ax.set_title(bb)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if "ax" in kwargs.keys():
return ax
plt.show()
plt.close()
def show_linear_correction(self,species,**kwargs):
"""plots the original and corrected data against the reference for the given species"""
for bb in self.beacon_data["beacon"].unique():
beacon_by_bb = self.beacon_data[self.beacon_data["beacon"] == bb].set_index("timestamp")
#beacon_by_bb = self.apply_laplacion_filter(beacon_by_bb,species)
try:
_, axes = plt.subplots(1,4,figsize=(26,6),gridspec_kw={"wspace":0.2,"width_ratios":[0.25,0.25,0.25,0.25]})
self.compare_time_series(species,ax=axes[0],beacons=[bb])
# original data - scatter
self.compare_scatter(species,ax=axes[1],beacons=[bb],**kwargs)
# corrected data - timeseries
corrected_by_bb = beacon_by_bb.copy()
corrected_by_bb[species] = beacon_by_bb[species] * self.lms[species].loc[bb,"coefficient"] + self.lms[species].loc[bb,"constant"]
corrected_by_bb = corrected_by_bb.shift(self.lms[species].loc[bb,"ts_shift"])[:len(self.ref[species])]
self.compare_time_series(species,ax=axes[2],beacons=[bb],data=corrected_by_bb)
# corrected data - scatter
self.compare_scatter(species,ax=axes[3],beacons=[bb],data=corrected_by_bb,**kwargs)
plt.show()
plt.close()
except ValueError as e:
print(e)
print(f"Length of data for Beacon {bb} is {len(beacon_by_bb[species].dropna())}")
except KeyError as e:
print(e)
print("No data for beacon", bb)
def show_comprehensive_calibration(self,**kwargs):
"""shows the three figure panel of the calibration"""
for bb in self.beacon_data["beacon"].unique():
beacon_by_bb = self.beacon_data[self.beacon_data["beacon"] == bb].set_index("timestamp")
if "start_time" in kwargs.keys():
beacon_by_bb = beacon_by_bb[kwargs["start_time"]:]
if "end_time" in kwargs.keys():
beacon_by_bb = beacon_by_bb[:kwargs["end_time"]]
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(2, 2)
# top timeseries figure
ts = fig.add_subplot(gs[0,:])
# bottom left correlation plot
corr = fig.add_subplot(gs[1,0])
# bottom right difference plot
diff = fig.add_subplot(gs[1,1])
def show_comprehensive_linear_corr(self,species,r,c,**kwargs):
"""shows a subplot of all the correlation beacons"""
fig, axes = plt.subplots(r,c,figsize=(c*4,r*4),sharex=True,sharey=True)
for bb, ax in zip(self.beacons,axes.flat):
beacon_by_bb = self.beacon_data[self.beacon_data["beacon"] == bb].set_index("timestamp")
corrected_by_bb = beacon_by_bb.copy()
corrected_by_bb[species] = beacon_by_bb[species] * self.lms[species].loc[bb,"coefficient"] + self.lms[species].loc[bb,"constant"]
corrected_by_bb = corrected_by_bb.shift(self.lms[species].loc[bb,"ts_shift"])[:len(self.ref[species])]
ax.scatter(self.ref[species]["concentration"],corrected_by_bb[species],color="black",zorder=2)
max_val = max(np.nanmax(self.ref[species]["concentration"]),np.nanmax(corrected_by_bb[species]))
if "min_val" in kwargs.keys():
min_val = kwargs["min_val"]
else:
min_val = 0
# 1:1
ax.plot([min_val,max_val],[min_val,max_val],color="firebrick",linewidth=2,zorder=1)
# axis
# annotating
lm_bb = self.lms[species][self.lms[species].index == bb]
r2 = self.lms[species]
ax.set_title(f" Device {bb}\n r$^2$ = {round(lm_bb['score'].values[0],3)}\n y = {round(lm_bb['coefficient'].values[0],1)}x + {round(lm_bb['constant'].values[0],1)}",
y=0.85,pad=0,fontsize=13,loc="left",ha="left")
ax.axis('off')
axes[r-1,0].axis('on')
for loc in ["top","right"]:
axes[r-1,0].spines[loc].set_visible(False)
plt.setp(axes[r-1,0].get_xticklabels(), ha="center", rotation=0, fontsize=16)
plt.setp(axes[r-1,0].get_yticklabels(), ha="right", rotation=0, fontsize=16)
axes[1,0].text(-1,7.5,f'BEVO Beacon {visualize.get_pollutant_label(species)} ({visualize.get_pollutant_units(species)})',rotation=90,ha='center',va='center',fontsize=18)
axes[r-1,3].text(7.5,1,f'Reference {visualize.get_pollutant_label(species)} ({visualize.get_pollutant_units(species)})',ha='center',va='top',fontsize=18)
plt.show()
plt.close()
def show_step_offset(self,species="co",base_vals=[0,1,2,4],step_length=2):
"""
Visualizes results from step calibration offset
Parameters
----------
species : str
Variable of interest
base_vals : list of int/float, default [0,1,2,4]
List of base values at each step
step_length : int or float, default 2
Length of each step in the experiment in hours
Returns
-------
<void>
"""
ref_index = pd.date_range(start=self.beacon_data["timestamp"].iloc[0],
end=self.beacon_data["timestamp"].iloc[0]+timedelta(hours=step_length*len(base_vals)),
freq=f"{self.resample_rate}T",closed="right")
ref_vals = []
for val in base_vals:
ref_vals += [val]*int(step_length*60/self.resample_rate)
_, ax = plt.subplots(figsize=(16,4))
ax.plot(ref_index,ref_vals,lw=2,color="black",label="Reference")
for bb in self.beacon_data["beacon"].unique():
data_bb = self.beacon_data[self.beacon_data["beacon"] == bb]
offset_val = self.offsets[species].loc[bb,"constant"]
ax.plot(data_bb["timestamp"],data_bb[species]-offset_val,lw=1,marker=visualize.get_marker(int(bb)),zorder=int(bb),label=bb)
for loc in ["top","right"]:
ax.spines[loc].set_visible(False)
ax.legend()
plt.show()
plt.close()
#return pd.DataFrame(data=ref_vals,index=ref_index,columns=["concentration"])
def show_step_linear(self,species="co",base_vals=[0,1,2,4],step_length=2):
"""
Shows the results from the linear correction on the step calibration
"""
ref_index = pd.date_range(start=self.beacon_data["timestamp"].iloc[0],
end=self.beacon_data["timestamp"].iloc[0]+timedelta(hours=step_length*len(base_vals)),
freq=f"{self.resample_rate}T",closed="right")
ref_vals = []
for val in base_vals:
ref_vals += [val]*int(step_length*60/self.resample_rate)
_, ax = plt.subplots(figsize=(16,4))
ax.plot(ref_index,ref_vals,lw=2,color="black",label="Reference")
for bb in self.beacon_data["beacon"].unique():
data_bb = self.beacon_data[self.beacon_data["beacon"] == bb]
y = data_bb[species] * self.lms[species].loc[bb,"coefficient"] + self.lms[species].loc[bb,"constant"]
ax.plot(data_bb["timestamp"],y,lw=1,marker=visualize.get_marker(int(bb)),zorder=int(bb),label=bb)
for loc in ["top","right"]:
ax.spines[loc].set_visible(False)
ax.legend()
plt.show()
plt.close()
def show_comprehensive_ts(self,species,r,c,beacons_to_exclude=[],save=False,**kwargs):
"""Plots comprehensive time series of the species against the min and max values"""
data = self.beacon_data[~self.beacon_data["beacon"].isin(beacons_to_exclude)]
temp = data[["timestamp",species,"beacon"]].pivot(index="timestamp",columns="beacon",values=species)#.dropna(axis=1)
for col in temp.columns:
offset = self.offsets[species][self.offsets[species].index == col]
temp[col] += offset["constant"].values
temp["mean"] = temp.mean(axis=1)
temp["min"] = temp.min(axis=1)
temp["max"] = temp.max(axis=1)
temp["t"] = (temp.index - temp.index[0]).total_seconds()/60
fig, axes = plt.subplots(r,c,figsize=(c*4,r*4),sharex=True,sharey=True)
for bb, ax in zip(self.beacons,axes.flat):
try:
ax.plot(temp["t"],temp[bb],color="black",linewidth=2,zorder=2)
ax.fill_between(temp["t"],temp["min"],temp["max"],alpha=0.5,color="grey",zorder=1)
except KeyError:
pass
ax.set_title(f" Device {int(bb)}",y=0.85,pad=0,fontsize=13,loc="left",ha="left")
ax.axis("off")
if "limits" in kwargs.keys():
#ax.set_xlim(kwargs["limits"])
ax.set_ylim(kwargs["limits"])
axes[r-1,0].axis('on')
for loc in ["top","right"]:
axes[r-1,0].spines[loc].set_visible(False)
ax.set_xticks(np.arange(0,125,30))
plt.setp(axes[r-1,0].get_xticklabels(), ha="center", rotation=0, fontsize=16)
plt.setp(axes[r-1,0].get_yticklabels(), ha="right", rotation=0, fontsize=16)
axes[1,0].text(-2,7.5,f"Concentration ({visualize.get_pollutant_units(species)})",rotation=90,ha='right',va='center',fontsize=24)
axes[r-1,int(c/2)].text(7.5,-2,f'Experiment Time (minutes)',ha='center',va='top',fontsize=24)
if save:
if "study" in kwargs.keys():
study = "-"+kwargs["study"]
else:
study = ""
plt.savefig(f"../reports/figures/beacon_summary/calibration-{species}-comprehensive_ts{study}.pdf",bbox_inches="tight")
plt.show()
plt.close()
# deprecated
def compare_histogram(self,ref_data,beacon_data,bins):
"""
Plots reference and beacon data as histograms
Inputs:
- ref_data: dataframe of reference data with single column corresponding to data indexed by time
- beacon_data: dataframe of beacon data with two columns corresponding to data and beacon number indexed by time
"""
_, ax = plt.subplots(10,5,figsize=(30,15),gridspec_kw={"hspace":0.5})
for i, axes in enumerate(ax.flat):
# getting relevant data
beacon_df = beacon_data[beacon_data["beacon"] == i]
beacon_df.dropna(inplace=True)
if len(beacon_df) > 1:
# reference data
axes.hist(ref_data.iloc[:,0].values,bins=bins,color="black",zorder=1)
# beacon data
axes.hist(beacon_df.iloc[:,0].values,bins=bins,color="white",edgecolor="black",alpha=0.7,zorder=9)
axes.set_title(f"B{i}",pad=0)
# x-axis
axes.set_xticks(bins)
# remainder
for spine in ["top","right"]:
axes.spines[spine].set_visible(False)
else:
# making it easier to read by removing the unused figures
axes.set_xticks([])
axes.set_yticks([])
for spine in ["top","right","bottom","left"]:
axes.spines[spine].set_visible(False)
plt.show()
plt.close()
# diagnostics
def get_reporting_beacons(self,species):
"""gets the list of beacons that report measurements from the specified sensor"""
var_only = self.beacon_data[[species,"beacon"]]
reporting_beacons = []
for bb in var_only["beacon"].unique():
df = var_only.dropna(subset=[species])
if len(df) > 2:
reporting_beacons.append(bb)
try:
if species.lower() == "no2":
possible_beacons = [x for x in self.beacons if x <= 28] # getting no2 sensing beacons only
missing_beacons = [x for x in possible_beacons if x not in reporting_beacons]
else:
missing_beacons = [x for x in self.beacons if x not in reporting_beacons]
print(f"Missing data from: {missing_beacons}")
except AttributeError:
print("Calibration object has no attribute 'beacons' - run 'set_beacons' with the desired beacon list")
return [], reporting_beacons
return missing_beacons, reporting_beacons
# calibration
def offset(self,species,baseline=0,save_to_file=False,show_corrected=False):
"""
Gets the average offset value and standard deviation between the beacon and reference measurement
Inputs:
- species: string specifying the variable of interest
Returns dataframe holding the average difference and standard deviation between the differences
"""
offsets = {"beacon":[],"mean_difference":[],"value_to_baseline":[],"constant":[]}
ref_df = self.ref[species]
for beacon in np.arange(1,51):
offsets["beacon"].append(beacon)
# getting relevant data
beacon_df = self.beacon_data[self.beacon_data["beacon"] == beacon].set_index("timestamp")
beacon_df.dropna(subset=[species],inplace=True)
if len(beacon_df) > 1:
if len(ref_df) != len(beacon_df):
# resizing arrays to include data from both modalities
max_start_date = max(ref_df.index[0],beacon_df.index[0])
min_end_date = min(ref_df.index[-1],beacon_df.index[-1])
ref_df = ref_df[max_start_date:min_end_date]
beacon_df = beacon_df[max_start_date:min_end_date]
print(f"Beacon {beacon}: Reference and beacon data are not the same length")
# merging beacon and reference data to get difference
for shift in range(4):
temp_beacon = beacon_df.copy()
temp_beacon.index += timedelta(minutes=shift)
df = pd.merge(left=temp_beacon,left_index=True,right=ref_df,right_index=True,how="inner")
if len(df) > 1:
beacon_df.index += timedelta(minutes=shift)
break
df["delta"] = df[species] - df["concentration"]
# adding data
mean_delta = np.nanmean(df["delta"])
val_to_base = np.nanmin(df[species]) - baseline
offsets["mean_difference"].append(mean_delta)
offsets["value_to_baseline"].append(val_to_base)
if np.nanmin(df[species]) - mean_delta < baseline:
offsets["constant"].append((np.nanmin(df[species]) - baseline)*-1)
else:
offsets["constant"].append(mean_delta*1)
else:
# adding zeros
offsets["mean_difference"].append(0)
offsets["value_to_baseline"].append(0)
offsets["constant"].append(0)
offset_df = pd.DataFrame(data=offsets)
offset_df.set_index("beacon",inplace=True)
self.offsets[species] = offset_df
if save_to_file:
self.save_offsets(species)
if show_corrected:
# Plotting Corrected Timeseries by beacon
# ---------------------------------------
fig, ax = plt.subplots(10,5,figsize=(30,15),sharex=True)
for i, axes in enumerate(ax.flat):
# getting relevant data
beacon_df = self.beacon_data[self.beacon_data["beacon"] == i]
beacon_df.dropna(subset=[species],inplace=True)
if len(beacon_df) > 1:
axes.plot(ref_df.index,ref_df["concentration"],color="black")
beacon_df[species] -= offset_df.loc[i,"constant"]
axes.plot(beacon_df.index,beacon_df[species],color="seagreen")
axes.set_title(f"beacon {i}")
for spine in ["top","right","bottom"]:
axes.spines[spine].set_visible(False)
else:
# making it easier to read by removing the unused figures
axes.set_xticks([])
axes.set_yticks([])
for spine in ["top","right","bottom","left"]:
axes.spines[spine].set_visible(False)
plt.subplots_adjust(hspace=0.5)
plt.show()
plt.close()
# Plotting Corrected Timeseries over Entire Calibration
# -----------------------------------------------------
fig, ax = plt.subplots(figsize=(16,6))
for bb in self.beacon_data["beacon"].unique():
beacon_df = self.beacon_data[self.beacon_data["beacon"] == bb]
beacon_df.dropna(subset=[species],inplace=True)
if len(beacon_df) > 1:
beacon_df[species] -= offset_df.loc[bb,"constant"]
ax.plot(beacon_df.index,beacon_df[species],marker=self.get_marker(int(bb)),zorder=int(bb),label=bb)
for spine in ["top","right"]:
ax.spines[spine].set_visible(False)
ax.plot(ref_df.index,ref_df["concentration"],color="black",zorder=99)
ax.legend(bbox_to_anchor=(1,1),frameon=False,ncol=2)
plt.show()
plt.close()
def step_calibration_offset(self,species="co",base_vals=[0,1,2,4],step_length=2,trim=0.25):
"""
Gets offset values based on step calibration
Parameters
----------
species : str
Variable of interest
base_vals : list of int/float, default [0,1,2,4]
List of base values at each step
step_length : int or float, default 2
Length of each step in the experiment in hours
trim : float, default 0.25
Fraction of step_length to trim from beginning and end
Returns
-------
offsets : dict
List of offsets corresponding to each reference base level
"""
offsets = {base: [] for base in base_vals}
offsets["beacon"] = []
for bb in self.beacon_data["beacon"].unique():
offsets["beacon"].append(bb)
data_bb = self.beacon_data[self.beacon_data["beacon"] == bb]
data_bb.set_index("timestamp",inplace=True)
start_time = data_bb.index[0]
for step in range(len(base_vals)):
step_start = start_time+timedelta(hours=step_length*(step))+timedelta(hours=step_length*trim)
step_end = start_time+timedelta(hours=step_length*(step+1))-timedelta(hours=step_length*trim)
data_bb_step = data_bb[step_start:step_end]
offsets[base_vals[step]].append(np.nanmean(data_bb_step[species]) - base_vals[step])
exp_offsets = pd.DataFrame(offsets)
exp_offsets.set_index("beacon",inplace=True)
temp_offsets = {"beacon":[],"mean_difference":[],"value_to_baseline":[],"constant":[]}
for key,val in zip(temp_offsets.keys(),[offsets["beacon"],exp_offsets.mean(axis=1).values,exp_offsets.mean(axis=1).values,exp_offsets.mean(axis=1).values]):
temp_offsets[key] = val
self.offsets[species] = pd.DataFrame(temp_offsets).set_index("beacon")
return exp_offsets
def step_calibration_linear(self,species="co",base_vals=[0,1,2,4],step_length=2,trim=0.25):
"""
Gets linear fit from step calibration
Parameters
----------
species : str
Variable of interest
base_vals : list of int/float, default [0,1,2,4]
List of base values at each step
step_length : int or float, default 2
Length of each step in the experiment in hours
trim : float, default 0.25
Fraction of step_length to trim from beginning and end
Returns
-------
params : dict
List of offsets corresponding to each reference base level
"""
n = len(self.beacon_data["beacon"].unique())
_, axes = plt.subplots(1,n,figsize=(4*n,4))
coeffs = {"beacon":[],"constant":[],"coefficient":[],"score":[],"ts_shift":[]}
for bb,ax in zip(self.beacon_data["beacon"].unique(),axes.flat):
coeffs["beacon"].append(bb)
data_bb = self.beacon_data[self.beacon_data["beacon"] == bb]
data_bb.set_index("timestamp",inplace=True)
start_time = data_bb.index[0]
x = []
for step in range(len(base_vals)):
step_start = start_time+timedelta(hours=step_length*(step))+timedelta(hours=step_length*trim)
step_end = start_time+timedelta(hours=step_length*(step+1))-timedelta(hours=step_length*trim)
data_bb_step = data_bb[step_start:step_end]
x.append(np.nanmean(data_bb_step[species]))
x = np.array(x)
regr = linear_model.LinearRegression()
regr.fit(x.reshape(-1, 1), base_vals)
for param, label in zip([regr.intercept_, regr.coef_[0], regr.score(x.reshape(-1, 1),base_vals),0], ["constant","coefficient","score","ts_shift"]):
coeffs[label].append(param)
ax.scatter(base_vals,x,color="black",s=10)
x_vals = np.linspace(0,max(base_vals),100)
ax.plot(base_vals,regr.intercept_+x*regr.coef_[0],color="firebrick",lw=2)
plt.show()
plt.close()
coeff_df = pd.DataFrame(coeffs)
coeff_df.set_index("beacon",inplace=True)
self.lms[species] = coeff_df
return coeffs
def get_linear_model_params(self,df,x_label,y_label,**kwargs):
"""runs linear regression and returns intercept, slope, r2, and mae"""
x = df.loc[:,x_label].values
y = df.loc[:,y_label].values
regr = linear_model.LinearRegression()
try:
if "weights" in kwargs.keys():
weights = kwargs["weights"]
else:
weights= None
regr.fit(x.reshape(-1, 1), y, sample_weight=weights)
y_pred = regr.intercept_ + x * regr.coef_[0]
return regr.intercept_, regr.coef_[0], regr.score(x.reshape(-1, 1),y), mean_absolute_error(y_true=y,y_pred=y_pred)
except ValueError as e:
print(f"Error with data ({e}) - returning (0,1)")
return 0, 1, 0, np.nan
def apply_laplacion_filter(self,data,var,threshold=0.25):
"""applies laplacian filter to data and returns values with threshold limits"""
lap = scipy.ndimage.filters.laplace(data[var])
lap /= np.max(lap)
# filtering out high variability
data["lap"] = lap
data_filtered = data[(data["lap"] < threshold) & (data["lap"] > -1*threshold)]
data_filtered.drop("lap",axis="columns",inplace=True)
return data_filtered
def linear_regression(self,species,weight=False,save_to_file=False,verbose=False,**kwargs):
"""generates a linear regression model"""
coeffs = {"beacon":[],"constant":[],"coefficient":[],"score":[],"mae":[],"ts_shift":[]}
ref_df = self.ref[species]
data = self.beacon_data[["timestamp",species,"beacon"]]
for bb in np.arange(1,51):
beacon_by_bb = data[data["beacon"] == bb].set_index("timestamp")
if verbose:
print(f"Working for Beacon {bb}")
print(beacon_by_bb.head())
if len(beacon_by_bb) > 1:
if len(ref_df) != len(beacon_by_bb):
# resizing arrays to include data from both modalities
max_start_date = max(ref_df.index[0],beacon_by_bb.index[0])
min_end_date = min(ref_df.index[-1],beacon_by_bb.index[-1])
ref_df = ref_df[max_start_date:min_end_date]
beacon_by_bb = beacon_by_bb[max_start_date:min_end_date]
print(f"Beacon {bb}: Reference and beacon data are not the same length")
beacon_by_bb.drop(["beacon"],axis=1,inplace=True)
# applying laplacion filter
if "lap_filter" in kwargs.keys():
if kwargs["lap_filter"] == True:
beacon_by_bb = self.apply_laplacion_filter(beacon_by_bb,species)
# running linear models with shifted timestamps
best_params = [-math.inf,-math.inf,-math.inf, -math.inf] # b, m, r2, ts_shift
for ts_shift in range(-3,4):
comb = ref_df.merge(right=beacon_by_bb.shift(ts_shift),left_index=True,right_index=True,how="inner")
comb.dropna(inplace=True)
if "event" in kwargs.keys():
event = kwargs["event"]
data_before_event = comb[:event]
baseline = np.nanmean(data_before_event[species])
data_before_event = data_before_event[data_before_event[species] > baseline-np.nanstd(data_before_event[species])]
data_after_event = comb[event:]
data_after_event = data_after_event[data_after_event[species] > baseline+2*np.nanstd(data_before_event[species])]
comb = | pd.concat([data_before_event,data_after_event]) | pandas.concat |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator
def modified_fillna(arr, fillers):
ser = | pd.Series(arr) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import is_bool_indexer
from pandas.core.indexing import check_bool_indexer
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from pandas.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_pandas, wrap_udf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _get_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_map(func_name):
def str_op_builder(df, *args, **kwargs):
str_s = df.squeeze(axis=1).str
return getattr(pandas.Series.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze(axis=1).dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
def _dt_func_map(func_name):
"""
Create a function that call method of property `dt` of the series.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies callable methods of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
dt_s = df.squeeze(axis=1).dt
return pandas.DataFrame(
getattr(pandas.Series.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_get_axis(0), _set_axis(0))
columns = property(_get_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy())
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = BinaryFunction.register(pandas.DataFrame.add)
combine = BinaryFunction.register(pandas.DataFrame.combine)
combine_first = BinaryFunction.register(pandas.DataFrame.combine_first)
eq = BinaryFunction.register(pandas.DataFrame.eq)
floordiv = BinaryFunction.register(pandas.DataFrame.floordiv)
ge = BinaryFunction.register(pandas.DataFrame.ge)
gt = BinaryFunction.register(pandas.DataFrame.gt)
le = BinaryFunction.register(pandas.DataFrame.le)
lt = BinaryFunction.register(pandas.DataFrame.lt)
mod = BinaryFunction.register(pandas.DataFrame.mod)
mul = BinaryFunction.register(pandas.DataFrame.mul)
ne = BinaryFunction.register(pandas.DataFrame.ne)
pow = BinaryFunction.register(pandas.DataFrame.pow)
rfloordiv = BinaryFunction.register(pandas.DataFrame.rfloordiv)
rmod = BinaryFunction.register(pandas.DataFrame.rmod)
rpow = BinaryFunction.register(pandas.DataFrame.rpow)
rsub = BinaryFunction.register(pandas.DataFrame.rsub)
rtruediv = BinaryFunction.register(pandas.DataFrame.rtruediv)
sub = BinaryFunction.register(pandas.DataFrame.sub)
truediv = BinaryFunction.register(pandas.DataFrame.truediv)
__and__ = BinaryFunction.register(pandas.DataFrame.__and__)
__or__ = BinaryFunction.register(pandas.DataFrame.__or__)
__rand__ = BinaryFunction.register(pandas.DataFrame.__rand__)
__ror__ = BinaryFunction.register(pandas.DataFrame.__ror__)
__rxor__ = BinaryFunction.register(pandas.DataFrame.__rxor__)
__xor__ = BinaryFunction.register(pandas.DataFrame.__xor__)
df_update = BinaryFunction.register(
copy_df_for_func(pandas.DataFrame.update), join_type="left"
)
series_update = BinaryFunction.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_series, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
if how in ["left", "inner"]:
right = right.to_pandas()
def map_func(left, right=right, kwargs=kwargs):
return pandas.DataFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_pandas(pandas.DataFrame.reset_index, **kwargs)
if not drop:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.copy()
new_self.index = pandas.RangeIndex(len(new_self.index))
return new_self
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
PandasQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_map, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(pandas.DataFrame.count, pandas.DataFrame.sum)
max = MapReduceFunction.register(pandas.DataFrame.max, pandas.DataFrame.max)
min = MapReduceFunction.register(pandas.DataFrame.min, pandas.DataFrame.min)
sum = MapReduceFunction.register(pandas.DataFrame.sum, pandas.DataFrame.sum)
prod = MapReduceFunction.register(pandas.DataFrame.prod, pandas.DataFrame.prod)
any = MapReduceFunction.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = MapReduceFunction.register(pandas.DataFrame.all, pandas.DataFrame.all)
memory_usage = MapReduceFunction.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
mean = MapReduceFunction.register(
lambda df, **kwargs: df.apply(
lambda x: (x.sum(skipna=kwargs.get("skipna", True)), x.count()),
axis=kwargs.get("axis", 0),
result_type="reduce",
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
lambda df, **kwargs: df.apply(
lambda x: x.apply(lambda d: d[0]).sum(skipna=kwargs.get("skipna", True))
/ x.apply(lambda d: d[1]).sum(skipna=kwargs.get("skipna", True)),
axis=kwargs.get("axis", 0),
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
)
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except (ValueError):
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series(
[df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan]
)
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxmax = ReductionFunction.register(pandas.DataFrame.idxmax)
idxmin = ReductionFunction.register(pandas.DataFrame.idxmin)
median = ReductionFunction.register(pandas.DataFrame.median)
nunique = ReductionFunction.register(pandas.DataFrame.nunique)
skew = ReductionFunction.register(pandas.DataFrame.skew)
kurt = ReductionFunction.register(pandas.DataFrame.kurt)
sem = ReductionFunction.register(pandas.DataFrame.sem)
std = ReductionFunction.register(pandas.DataFrame.std)
var = ReductionFunction.register(pandas.DataFrame.var)
sum_min_count = ReductionFunction.register(pandas.DataFrame.sum)
prod_min_count = ReductionFunction.register(pandas.DataFrame.prod)
quantile_for_single_value = ReductionFunction.register(pandas.DataFrame.quantile)
mad = ReductionFunction.register(pandas.DataFrame.mad)
to_datetime = ReductionFunction.register(
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_func(
self, resample_args, func_name, new_columns=None, df_op=None, *args, **kwargs
):
def map_func(df, resample_args=resample_args):
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(*resample_args)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except (ValueError):
resampled_val = df.copy().resample(*resample_args)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_args, name, obj):
return self._resample_func(resample_args, "get_group", name=name, obj=obj)
def resample_app_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_args, arg, *args, **kwargs):
return self._resample_func(resample_args, "transform", arg=arg, *args, **kwargs)
def resample_pipe(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_args, limit):
return self._resample_func(resample_args, "ffill", limit=limit)
def resample_backfill(self, resample_args, limit):
return self._resample_func(resample_args, "backfill", limit=limit)
def resample_bfill(self, resample_args, limit):
return self._resample_func(resample_args, "bfill", limit=limit)
def resample_pad(self, resample_args, limit):
return self._resample_func(resample_args, "pad", limit=limit)
def resample_nearest(self, resample_args, limit):
return self._resample_func(resample_args, "nearest", limit=limit)
def resample_fillna(self, resample_args, method, limit):
return self._resample_func(resample_args, "fillna", method=method, limit=limit)
def resample_asfreq(self, resample_args, fill_value):
return self._resample_func(resample_args, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_count(self, resample_args):
return self._resample_func(resample_args, "count")
def resample_nunique(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "nunique", _method=_method, *args, **kwargs
)
def resample_first(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "first", _method=_method, *args, **kwargs
)
def resample_last(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "last", _method=_method, *args, **kwargs
)
def resample_max(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "max", _method=_method, *args, **kwargs
)
def resample_mean(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_median(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_min(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "min", _method=_method, *args, **kwargs
)
def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "prod", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_size(self, resample_args):
return self._resample_func(resample_args, "size", new_columns=["__reduced__"])
def resample_sem(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "sem", _method=_method, *args, **kwargs
)
def resample_std(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "sum", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_var(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_args, q, **kwargs):
return self._resample_func(resample_args, "quantile", q=q, **kwargs)
window_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
window_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda df, rolling_args: pandas.DataFrame(df.rolling(*rolling_args).count())
)
rolling_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
rolling_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_min = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).min(*args, **kwargs)
)
)
rolling_max = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_apply = FoldFunction.register(
lambda df, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(*rolling_args).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda df, rolling_args, quantile, interpolation, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindex = True
else:
axis = 0
new_columns = None
need_reindex = False
def map_func(df):
return pandas.DataFrame(df.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not isinstance(calc_index, pandas.MultiIndex):
return True
actual_len = 1
for lvl in calc_index.levels:
actual_len *= len(lvl)
return len(self.index) * len(self.columns) == actual_len * len(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_all_multi_list = False
if (
isinstance(self.index, pandas.MultiIndex)
and isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_all_multi_list = True
real_cols_bkp = self.columns
obj = self.copy()
obj.columns = np.arange(len(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._apply_full_axis(
axis, map_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def get_unique_level_values(index):
return [
index.get_level_values(lvl).unique()
for lvl in np.arange(index.nlevels)
]
new_index = (
get_unique_level_values(index)
if consider_index
else index
if isinstance(index, list)
else [index]
)
new_columns = (
get_unique_level_values(columns) if consider_columns else [columns]
)
return pandas.MultiIndex.from_product([*new_columns, *new_index])
if is_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sort_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindex:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = isinstance(self.index, pandas.MultiIndex)
is_recompute_columns = not is_recompute_index and isinstance(
self.columns, pandas.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if isinstance(self.columns, pandas.MultiIndex) or not isinstance(
self.index, pandas.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and isinstance(self.index, pandas.MultiIndex)
else self.index
)
index = pandas.MultiIndex.from_tuples(
list(index) * len(self.columns)
)
columns = self.columns.repeat(len(self.index))
index_levels = [
index.get_level_values(i) for i in range(index.nlevels)
]
new_index = pandas.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindex(0, new_index)
return result
def stack(self, level, dropna):
if not isinstance(self.columns, pandas.MultiIndex) or (
isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.columns.nlevels
):
new_columns = ["__reduced__"]
else:
new_columns = None
new_modin_frame = self._modin_frame._apply_full_axis(
1,
lambda df: pandas.DataFrame(df.stack(level=level, dropna=dropna)),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that apply a function to every partition.
abs = MapFunction.register(pandas.DataFrame.abs, dtypes="copy")
applymap = MapFunction.register(pandas.DataFrame.applymap)
conj = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.conj(df))
)
invert = MapFunction.register(pandas.DataFrame.__invert__)
isin = MapFunction.register(pandas.DataFrame.isin, dtypes=np.bool)
isna = MapFunction.register(pandas.DataFrame.isna, dtypes=np.bool)
negative = MapFunction.register(pandas.DataFrame.__neg__)
notna = MapFunction.register(pandas.DataFrame.notna, dtypes=np.bool)
round = MapFunction.register(pandas.DataFrame.round)
replace = MapFunction.register(pandas.DataFrame.replace)
series_view = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
df.squeeze(axis=1).view(*args, **kwargs)
)
)
to_numeric = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
pandas.to_numeric(df.squeeze(axis=1), *args, **kwargs)
)
)
def repeat(self, repeats):
def map_fn(df):
return pandas.DataFrame(df.squeeze(axis=1).repeat(repeats))
if isinstance(repeats, int) or (is_list_like(repeats) and len(repeats) == 1):
return MapFunction.register(map_fn, validate_index=True)(self)
else:
return self.__constructor__(self._modin_frame._apply_full_axis(0, map_fn))
# END Map partitions operations
# String map partitions operations
str_capitalize = MapFunction.register(_str_map("capitalize"), dtypes="copy")
str_center = MapFunction.register(_str_map("center"), dtypes="copy")
str_contains = MapFunction.register(_str_map("contains"), dtypes=np.bool)
str_count = MapFunction.register(_str_map("count"), dtypes=int)
str_endswith = MapFunction.register(_str_map("endswith"), dtypes=np.bool)
str_find = MapFunction.register(_str_map("find"), dtypes="copy")
str_findall = MapFunction.register(_str_map("findall"), dtypes="copy")
str_get = MapFunction.register(_str_map("get"), dtypes="copy")
str_index = MapFunction.register(_str_map("index"), dtypes="copy")
str_isalnum = MapFunction.register(_str_map("isalnum"), dtypes=np.bool)
str_isalpha = MapFunction.register(_str_map("isalpha"), dtypes=np.bool)
str_isdecimal = MapFunction.register(_str_map("isdecimal"), dtypes=np.bool)
str_isdigit = MapFunction.register(_str_map("isdigit"), dtypes=np.bool)
str_islower = MapFunction.register(_str_map("islower"), dtypes=np.bool)
str_isnumeric = MapFunction.register(_str_map("isnumeric"), dtypes=np.bool)
str_isspace = MapFunction.register(_str_map("isspace"), dtypes=np.bool)
str_istitle = MapFunction.register(_str_map("istitle"), dtypes=np.bool)
str_isupper = MapFunction.register(_str_map("isupper"), dtypes=np.bool)
str_join = MapFunction.register(_str_map("join"), dtypes="copy")
str_len = MapFunction.register(_str_map("len"), dtypes=int)
str_ljust = MapFunction.register(_str_map("ljust"), dtypes="copy")
str_lower = MapFunction.register(_str_map("lower"), dtypes="copy")
str_lstrip = MapFunction.register(_str_map("lstrip"), dtypes="copy")
str_match = MapFunction.register(_str_map("match"), dtypes="copy")
str_normalize = MapFunction.register(_str_map("normalize"), dtypes="copy")
str_pad = MapFunction.register(_str_map("pad"), dtypes="copy")
str_partition = MapFunction.register(_str_map("partition"), dtypes="copy")
str_repeat = MapFunction.register(_str_map("repeat"), dtypes="copy")
str_replace = MapFunction.register(_str_map("replace"), dtypes="copy")
str_rfind = MapFunction.register(_str_map("rfind"), dtypes="copy")
str_rindex = MapFunction.register(_str_map("rindex"), dtypes="copy")
str_rjust = MapFunction.register(_str_map("rjust"), dtypes="copy")
str_rpartition = MapFunction.register(_str_map("rpartition"), dtypes="copy")
str_rsplit = MapFunction.register(_str_map("rsplit"), dtypes="copy")
str_rstrip = MapFunction.register(_str_map("rstrip"), dtypes="copy")
str_slice = MapFunction.register(_str_map("slice"), dtypes="copy")
str_slice_replace = MapFunction.register(_str_map("slice_replace"), dtypes="copy")
str_split = MapFunction.register(_str_map("split"), dtypes="copy")
str_startswith = MapFunction.register(_str_map("startswith"), dtypes=np.bool)
str_strip = MapFunction.register(_str_map("strip"), dtypes="copy")
str_swapcase = MapFunction.register(_str_map("swapcase"), dtypes="copy")
str_title = MapFunction.register(_str_map("title"), dtypes="copy")
str_translate = MapFunction.register(_str_map("translate"), dtypes="copy")
str_upper = MapFunction.register(_str_map("upper"), dtypes="copy")
str_wrap = MapFunction.register(_str_map("wrap"), dtypes="copy")
str_zfill = MapFunction.register(_str_map("zfill"), dtypes="copy")
# END String map partitions operations
def unique(self):
"""Return unique values of Series object.
Returns
-------
ndarray
The unique values returned as a NumPy array.
"""
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda x: x.squeeze(axis=1).unique(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame)
def searchsorted(self, **kwargs):
"""
Return a QueryCompiler with value/values indicies, which they should be inserted
to maintain order of the passed Series.
Returns
-------
PandasQueryCompiler
"""
def map_func(part, *args, **kwargs):
elements_number = len(part.index)
assert elements_number > 0, "Wrong mapping behaviour of MapReduce"
# unify value type
value = kwargs.pop("value")
value = np.array([value]) if is_scalar(value) else value
if elements_number == 1:
part = part[part.columns[0]]
else:
part = part.squeeze()
part_index_start = part.index.start
part_index_stop = part.index.stop
result = part.searchsorted(value=value, *args, **kwargs)
processed_results = {}
value_number = 0
for value_result in result:
value_result += part_index_start
if value_result > part_index_start and value_result < part_index_stop:
processed_results[f"value{value_number}"] = {
"relative_location": "current_partition",
"index": value_result,
}
elif value_result <= part_index_start:
processed_results[f"value{value_number}"] = {
"relative_location": "previoius_partitions",
"index": part_index_start,
}
else:
processed_results[f"value{value_number}"] = {
"relative_location": "next_partitions",
"index": part_index_stop,
}
value_number += 1
return pandas.DataFrame(processed_results)
def reduce_func(map_results, *args, **kwargs):
def get_value_index(value_result):
value_result_grouped = value_result.groupby(level=0)
rel_location = value_result_grouped.get_group("relative_location")
ind = value_result_grouped.get_group("index")
# executes if result is inside of the mapped part
if "current_partition" in rel_location.values:
assert (
rel_location[rel_location == "current_partition"].count() == 1
), "Each value should have single result"
return ind[rel_location.values == "current_partition"]
# executes if result is between mapped parts
elif rel_location.nunique(dropna=False) > 1:
return ind[rel_location.values == "previoius_partitions"][0]
# executes if result is outside of the mapped part
else:
if "next_partitions" in rel_location.values:
return ind[-1]
else:
return ind[0]
map_results_parsed = map_results.apply(
lambda ser: get_value_index(ser)
).squeeze()
if isinstance(map_results_parsed, pandas.Series):
map_results_parsed = map_results_parsed.to_list()
return pandas.Series(map_results_parsed)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# Dt map partitions operations
dt_date = MapFunction.register(_dt_prop_map("date"))
dt_time = MapFunction.register(_dt_prop_map("time"))
dt_timetz = MapFunction.register(_dt_prop_map("timetz"))
dt_year = MapFunction.register(_dt_prop_map("year"))
dt_month = MapFunction.register(_dt_prop_map("month"))
dt_day = MapFunction.register(_dt_prop_map("day"))
dt_hour = MapFunction.register(_dt_prop_map("hour"))
dt_minute = MapFunction.register(_dt_prop_map("minute"))
dt_second = MapFunction.register(_dt_prop_map("second"))
dt_microsecond = MapFunction.register(_dt_prop_map("microsecond"))
dt_nanosecond = MapFunction.register(_dt_prop_map("nanosecond"))
dt_week = MapFunction.register(_dt_prop_map("week"))
dt_weekofyear = MapFunction.register(_dt_prop_map("weekofyear"))
dt_dayofweek = MapFunction.register(_dt_prop_map("dayofweek"))
dt_weekday = MapFunction.register(_dt_prop_map("weekday"))
dt_dayofyear = MapFunction.register(_dt_prop_map("dayofyear"))
dt_quarter = MapFunction.register(_dt_prop_map("quarter"))
dt_is_month_start = MapFunction.register(_dt_prop_map("is_month_start"))
dt_is_month_end = MapFunction.register(_dt_prop_map("is_month_end"))
dt_is_quarter_start = MapFunction.register(_dt_prop_map("is_quarter_start"))
dt_is_quarter_end = MapFunction.register(_dt_prop_map("is_quarter_end"))
dt_is_year_start = MapFunction.register(_dt_prop_map("is_year_start"))
dt_is_year_end = MapFunction.register(_dt_prop_map("is_year_end"))
dt_is_leap_year = MapFunction.register(_dt_prop_map("is_leap_year"))
dt_daysinmonth = MapFunction.register(_dt_prop_map("daysinmonth"))
dt_days_in_month = MapFunction.register(_dt_prop_map("days_in_month"))
dt_tz = MapReduceFunction.register(
_dt_prop_map("tz"), lambda df: pandas.DataFrame(df.iloc[0]), axis=0
)
dt_freq = MapReduceFunction.register(
_dt_prop_map("freq"), lambda df: pandas.DataFrame(df.iloc[0]), axis=0
)
dt_to_period = MapFunction.register(_dt_func_map("to_period"))
dt_to_pydatetime = MapFunction.register(_dt_func_map("to_pydatetime"))
dt_tz_localize = MapFunction.register(_dt_func_map("tz_localize"))
dt_tz_convert = MapFunction.register(_dt_func_map("tz_convert"))
dt_normalize = MapFunction.register(_dt_func_map("normalize"))
dt_strftime = MapFunction.register(_dt_func_map("strftime"))
dt_round = MapFunction.register(_dt_func_map("round"))
dt_floor = MapFunction.register(_dt_func_map("floor"))
dt_ceil = MapFunction.register(_dt_func_map("ceil"))
dt_month_name = MapFunction.register(_dt_func_map("month_name"))
dt_day_name = MapFunction.register(_dt_func_map("day_name"))
dt_to_pytimedelta = MapFunction.register(_dt_func_map("to_pytimedelta"))
dt_total_seconds = MapFunction.register(_dt_func_map("total_seconds"))
dt_seconds = MapFunction.register(_dt_prop_map("seconds"))
dt_days = MapFunction.register(_dt_prop_map("days"))
dt_microseconds = MapFunction.register(_dt_prop_map("microseconds"))
dt_nanoseconds = MapFunction.register(_dt_prop_map("nanoseconds"))
dt_components = MapFunction.register(
_dt_prop_map("components"), validate_columns=True
)
dt_qyear = MapFunction.register(_dt_prop_map("qyear"))
dt_start_time = MapFunction.register(_dt_prop_map("start_time"))
dt_end_time = MapFunction.register(_dt_prop_map("end_time"))
dt_to_timestamp = MapFunction.register(_dt_func_map("to_timestamp"))
# END Dt map partitions operations
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
return self.__constructor__(self._modin_frame.astype(col_dtypes))
# Column/Row partitions reduce operations
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
def first_valid_index_builder(df):
return df.set_axis(
pandas.RangeIndex(len(df.index)), axis="index", inplace=False
).apply(lambda df: df.first_valid_index())
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, first_valid_index_builder)
)
.min(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
return df.set_axis(
pandas.RangeIndex(len(df.index)), axis="index", inplace=False
).apply(lambda df: df.last_valid_index())
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, last_valid_index_builder)
)
.max(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
empty_df = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
)
def describe_builder(df, internal_indices=[]):
return df.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
0,
describe_builder,
empty_df.columns,
new_index=empty_df.index,
new_columns=empty_df.columns,
)
)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
cummax = FoldFunction.register(pandas.DataFrame.cummax)
cummin = FoldFunction.register(pandas.DataFrame.cummin)
cumsum = FoldFunction.register(pandas.DataFrame.cumsum)
cumprod = FoldFunction.register(pandas.DataFrame.cumprod)
diff = FoldFunction.register(pandas.DataFrame.diff)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
if is_list_like(lower) or is_list_like(upper):
new_modin_frame = self._modin_frame._fold(
axis, lambda df: df.clip(**kwargs)
)
else:
new_modin_frame = self._modin_frame._map(lambda df: df.clip(**kwargs))
return self.__constructor__(new_modin_frame)
def dot(self, other, squeeze_self=None, squeeze_other=None):
"""
Computes the matrix multiplication of self and other.
Parameters
----------
other : PandasQueryCompiler or NumPy array
The other query compiler or NumPy array to matrix multiply with self.
squeeze_self : boolean
The flag to squeeze self.
squeeze_other : boolean
The flag to squeeze other (this flag is applied if other is query compiler).
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the matrix multiply.
"""
if isinstance(other, PandasQueryCompiler):
other = (
other.to_pandas().squeeze(axis=1)
if squeeze_other
else other.to_pandas()
)
def map_func(df, other=other, squeeze_self=squeeze_self):
result = df.squeeze(axis=1).dot(other) if squeeze_self else df.dot(other)
if is_list_like(result):
return pandas.DataFrame(result)
else:
return pandas.DataFrame([result])
num_cols = other.shape[1] if len(other.shape) > 1 else 1
if len(self.columns) == 1:
new_index = (
["__reduced__"]
if (len(self.index) == 1 or squeeze_self) and num_cols == 1
else None
)
new_columns = ["__reduced__"] if squeeze_self and num_cols == 1 else None
axis = 0
else:
new_index = self.index
new_columns = ["__reduced__"] if num_cols == 1 else None
axis = 1
new_modin_frame = self._modin_frame._apply_full_axis(
axis, map_func, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def _nsort(self, n, columns=None, keep="first", sort_type="nsmallest"):
def map_func(df, n=n, keep=keep, columns=columns):
if columns is None:
return pandas.DataFrame(
getattr(pandas.Series, sort_type)(
df.squeeze(axis=1), n=n, keep=keep
)
)
return getattr(pandas.DataFrame, sort_type)(
df, n=n, columns=columns, keep=keep
)
if columns is None:
new_columns = ["__reduced__"]
else:
new_columns = self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def nsmallest(self, *args, **kwargs):
return self._nsort(sort_type="nsmallest", *args, **kwargs)
def nlargest(self, *args, **kwargs):
return self._nsort(sort_type="nlargest", *args, **kwargs)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
empty_eval = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, inplace=False, **kwargs)
)
if isinstance(empty_eval, pandas.Series):
new_columns = (
[empty_eval.name] if empty_eval.name is not None else ["__reduced__"]
)
else:
new_columns = empty_eval.columns
new_modin_frame = self._modin_frame._apply_full_axis(
1,
lambda df: pandas.DataFrame(df.eval(expr, inplace=False, **kwargs)),
new_index=self.index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df):
result = pandas.DataFrame(df.mode(**kwargs))
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if axis == 0 and len(df) != len(result):
# Pad rows
result = result.reindex(index=pandas.RangeIndex(len(df.index)))
elif axis == 1 and len(df.columns) != len(result.columns):
# Pad columns
result = result.reindex(columns=pandas.RangeIndex(len(df.columns)))
return pandas.DataFrame(result)
if axis == 0:
new_index = pandas.RangeIndex(len(self.index))
new_columns = self.columns
else:
new_index = self.index
new_columns = pandas.RangeIndex(len(self.columns))
new_modin_frame = self._modin_frame._apply_full_axis(
axis, mode_builder, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
method = kwargs.get("method", None)
limit = kwargs.get("limit", None)
full_axis = method is not None or limit is not None
if isinstance(value, dict):
kwargs.pop("value")
def fillna(df):
func_dict = {c: value[c] for c in value if c in df.columns}
return df.fillna(value=func_dict, **kwargs)
else:
def fillna(df):
return df.fillna(**kwargs)
if full_axis:
new_modin_frame = self._modin_frame._fold(axis, fillna)
else:
new_modin_frame = self._modin_frame._map(fillna)
return self.__constructor__(new_modin_frame)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self._modin_frame._numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis == 1:
query_compiler = self.getitem_column_array(new_columns)
new_columns = self.index
else:
query_compiler = self
def quantile_builder(df, **kwargs):
result = df.quantile(**kwargs)
return result.T if kwargs.get("axis", 0) == 1 else result
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_pandas`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basically we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = pandas.Float64Index(q)
else:
q_index = pandas.Float64Index(q)
new_modin_frame = query_compiler._modin_frame._apply_full_axis(
axis,
lambda df: quantile_builder(df, **kwargs),
new_index=q_index,
new_columns=new_columns,
dtypes=np.float64,
)
result = self.__constructor__(new_modin_frame)
return result.transpose() if axis == 1 else result
def query(self, expr, **kwargs):
"""Query columns of the QueryCompiler with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
QueryCompiler containing the rows where the boolean expression is satisfied.
"""
def query_builder(df, **kwargs):
return df.query(expr, inplace=False, **kwargs)
return self.__constructor__(
self._modin_frame.filter_full_axis(1, query_builder)
)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
QueryCompiler containing the ranks of the values along an axis.
"""
axis = kwargs.get("axis", 0)
numeric_only = True if axis else kwargs.get("numeric_only", False)
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.rank(**kwargs),
new_index=self.index,
new_columns=self.columns if not numeric_only else None,
dtypes=np.float64,
)
return self.__constructor__(new_modin_frame)
def sort_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
level = kwargs.pop("level", None)
sort_remaining = kwargs.pop("sort_remaining", True)
kwargs["inplace"] = False
if level is not None or self.has_multiindex(axis=axis):
return self.default_to_pandas(
pandas.DataFrame.sort_index,
axis=axis,
level=level,
sort_remaining=sort_remaining,
**kwargs,
)
# sort_index can have ascending be None and behaves as if it is False.
# sort_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.sort_index(
axis=axis, level=level, sort_remaining=sort_remaining, **kwargs
),
new_index,
new_columns,
dtypes="copy" if axis == 0 else None,
)
return self.__constructor__(new_modin_frame)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
ErrorMessage.missmatch_with_pandas(
operation="melt", message="Order of rows could be different from pandas"
)
if var_name is None:
var_name = "variable"
def _convert_to_list(x):
if is_list_like(x):
x = [*x]
elif x is not None:
x = [x]
else:
x = []
return x
id_vars, value_vars = map(_convert_to_list, [id_vars, value_vars])
if len(value_vars) == 0:
value_vars = self.columns.drop(id_vars)
if len(id_vars) != 0:
to_broadcast = self.getitem_column_array(id_vars)._modin_frame
else:
to_broadcast = None
def applyier(df, internal_indices, other=[], internal_other_indices=[]):
if len(other):
other = pandas.concat(other, axis=1)
columns_to_add = other.columns.difference(df.columns)
df = pandas.concat([df, other[columns_to_add]], axis=1)
return df.melt(
id_vars=id_vars,
value_vars=df.columns[internal_indices],
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# we have no able to calculate correct indices here, so making it `dummy_index`
inconsistent_frame = self._modin_frame.broadcast_apply_select_indices(
axis=0,
apply_indices=value_vars,
func=applyier,
other=to_broadcast,
new_index=["dummy_index"] * len(id_vars),
new_columns=["dummy_index"] * len(id_vars),
)
# after applying `melt` for selected indices we will get partitions like this:
# id_vars vars value | id_vars vars value
# 0 foo col3 1 | 0 foo col5 a so stacking it into
# 1 fiz col3 2 | 1 fiz col5 b `new_parts` to get
# 2 bar col3 3 | 2 bar col5 c correct answer
# 3 zoo col3 4 | 3 zoo col5 d
new_parts = np.array(
[np.array([x]) for x in np.concatenate(inconsistent_frame._partitions.T)]
)
new_index = pandas.RangeIndex(len(self.index) * len(value_vars))
new_modin_frame = self._modin_frame.__constructor__(
new_parts,
index=new_index,
columns=id_vars + [var_name, value_name],
)
result = self.__constructor__(new_modin_frame)
# this assigment needs to propagate correct indices into partitions
result.index = new_index
return result
# END Map across rows/columns
# __getitem__ methods
def getitem_array(self, key):
"""
Get column or row data specified by key.
Parameters
----------
key : PandasQueryCompiler, numpy.ndarray, pandas.Index or list
Target numeric indices or labels by which to retrieve data.
Returns
-------
PandasQueryCompiler
A new Query Compiler.
"""
# TODO: dont convert to pandas for array indexing
if isinstance(key, type(self)):
key = key.to_pandas().squeeze(axis=1)
if is_bool_indexer(key):
if isinstance(key, pandas.Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
PendingDeprecationWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
"Item wrong length {} instead of {}.".format(
len(key), len(self.index)
)
)
key = | check_bool_indexer(self.index, key) | pandas.core.indexing.check_bool_indexer |
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 16:15:24 2017
Sponsors Club messaging functions
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import re
import math
import textwrap
from tkinter import filedialog
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pkg.SC_signup_functions import findcards
from openpyxl import load_workbook
import pkg.SC_config as cnf
def emailparent_tk(teams, season, year):
''' Inteface for non-billing email messages to parents (non-generic)
Message types include:
recruit - specific inquiry about player from last year not yet signed up; needs signupfile w/ recruits tab
assign - notify of team assignment, optional recruit for short team, CYC card notify; teams/cards/mastersignups
missinguni - ask about missing uniforms; missingunifile
unireturn - generic instructions for uniform return; mastersignups w/ unis issued
askforcards - check for CYC card on file and ask
other -- Generic single all team+coaches message (can have $SCHOOL, $GRADERANGE,$COACHINFO, $SPORT, $PLAYERLIST)
8/9/17 works for team assignments
TODO test recruit, missing unis, unireturn
args:
teams - df w/ active teams
season -'Winter', 'Fall' or 'Spring'
year - starting sport year i.e. 2019 for 2019-20 school year
'''
#%%
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail to parents')
messageframe=tk.LabelFrame(root, text='Message options')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
recruitbool=tk.BooleanVar() # optional recruiting for short teams
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
transmessfile=tk.StringVar() # text of e-mail message for transfers
extravar=tk.StringVar() # use depends on message type... normally filename
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra_file_name.txt') # default starting choice
choice=tk.StringVar() # test or send -mail
def chooseFile(txtmess, ftypes):
''' tkinter file chooser (passes message string for window and expected
file types as tuple e.g. ('TXT','*.txt')
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = txtmess, filetypes=[ ftypes] )
root.destroy() # closes pop up window
return full_path
def choose_message():
# choose existing message (.txt file)
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose message file', filetypes=[ ('TXT','*.txt')] )
root.destroy() # closes pop up window
return full_path
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Assignopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
extraname.set('n/a')
messfile.set('parent_team_assignment.txt')
transmessfile.set('parent_team_transfer.txt')
emailtitle.set('Fall $SPORT for $FIRST')
def Recruitopts():
''' Display relevant choices for specific player recruiting'''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
messfile.set('player_recruiting.txt')
transmessfile.set('n/a')
extraname.set('n/a')
emailtitle.set('Cabrini-Soulard sports for $FIRST this fall?')
def Missingopts():
''' Display relevant choices for ask parent for missing uniforms '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('finish_me.txt')
transmessfile.set('n/a')
extraname.set('Missing uni file name')
extravar.set('missing_uni.csv')
# TODO look up most recent uni file?
emailtitle.set("Please return $FIRST's $SPORT uniform!")
def Schedopts():
''' Display relevant choices for sending schedules (game and practice) to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('parent_game_schedule.txt')
transmessfile.set('n/a')
extraname.set('Game schedule file')
extravar.set('Cabrini_2017_schedule.csv')
emailtitle.set("Game schedule for Cabrini $GRADERANGE $GENDER $SPORT")
def Cardopts():
''' Display relevant choices for asking parent for missing CYC cards '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.DISABLED)
messfile.set('CYCcard_needed.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("CYC card needed for $FIRST")
def Otheropts():
''' Display relevant choices for other generic message to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
def Allopts():
''' Display relevant choices for generic message to all sports parents '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
# E-mail title and message file name
rownum=0
tk.Label(messageframe, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(messageframe, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(messageframe, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='Transfer messagefile').grid(row=rownum, column=0)
transmessentry=tk.Entry(messageframe, textvariable=transmessfile)
transmessentry.config(width=50)
transmessentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot
tk.Radiobutton(messageframe, text='Team assignment', value='Assign', variable = mtype, command=Assignopts).grid(row=rownum, column=0)
tk.Radiobutton(messageframe, text='Recruit missing', value='Recruit', variable = mtype, command=Recruitopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Missing uni', value='Missing', variable = mtype, command=Missingopts).grid(row=rownum, column=2)
tk.Radiobutton(messageframe, text='Send schedule', value='Schedule', variable = mtype, command=Schedopts).grid(row=rownum, column=3)
rownum+=1
tk.Radiobutton(messageframe, text='Ask for cards', value='Cards', variable = mtype, command=Cardopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Other team message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='All sport parents', value='All', variable = mtype, command=Allopts).grid(row=rownum, column=2)
rownum+=1
tk.Label(messageframe, text=extraname.get()).grid(row=rownum, column=0)
extraentry=tk.Entry(messageframe, textvariable=extravar)
extraentry.grid(row=rownum, column=1)
# Extra file chooser button
# button arg includes file type extension .. get from messfile
try:
ft = extraname.get().split('.')[-1]
ftypes =("%s" %ft.upper(), "*.%s" %ft)
except:
ftypes =("CSV" , "*.*") # default to all files
# TODO fix extra file chooser
d=tk.Button(messageframe, text='Choose file', command=chooseFile('Choose extra file', ftypes) )
d.grid(row=rownum, column=2)
recruitcheck=tk.Checkbutton(messageframe, variable=recruitbool, text='Recruit more players for short teams?')
recruitcheck.grid(row=rownum, column=3) # can't do immediate grid or nonetype is returned
rownum+=1
messageframe.grid(row=0, column=0)
# Specific team selector section using checkboxes
teamframe=tk.LabelFrame(root, text='Team selector')
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(teamframe, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(teamframe, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(teamframe, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
teamframe.grid(row=1, column=0)
choiceframe=tk.LabelFrame(root)
d=tk.Button(choiceframe, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(choiceframe, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(choiceframe, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(choiceframe, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
choiceframe.grid(row=2, column=0)
root.mainloop()
#%%
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='KCtest':
# this is a true send test but only to me
kwargs.update({'KCtest':True})
mychoice='send'
kwargs.update({'choice':mychoice}) # test or send
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates(['Team','Sport'])
# Now deal with the different types of messages
#%%
if mtype.get()=='Schedule':
# Send practice and game schedules
try:
sched=pd.read_csv(extravar.get())
except:
print('Problem opening schedule and other required files for sending game schedules')
fname=filedialog.askopenfilename(title='Select schedule file.')
sched=pd.read_csv(fname)
# fields=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Fields')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
# open and send master CYC schedule
sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Recruit':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
except:
print('Problem loading family contacts')
try: # Recruits stored in CSV
Recruits=pd.read_csv(cnf._OUTPUT_DIR+'\\%s%s_recruits.csv' %(season, year))
print('Loaded possible recruits from csv file')
except:
fname=filedialog.askopenfilename(title='Select recruits file.')
if fname.endswith('.csv'): # final move is query for file
Recruits=pd.read_csv(fname)
else:
print('Recruits file needed in csv format.')
return
emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs)
if mtype.get()=='Assign':
# Notify parents needs teams, mastersignups, famcontacts
if recruitbool.get():
kwargs.update({'recruit':True})
try:
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv', encoding='cp437')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
tranmessagefile='messages\\'+transmessfile.get()
with open(tranmessagefile, 'r') as file:
blanktransmess=file.read()
except:
print('Problem loading mastersignups, famcontacts')
return
notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs)
if mtype.get()=='Unis':
try:
missing=pd.read_csv(messfile.get(), encoding='cp437')
oldteams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
# TODO Finish ask for missing uniforms script
askforunis(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Cards':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading famcontacts, mastersignups, or blank message')
return
# TODO Finish ask for missing uniforms script
askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Other':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading mastersignups, coaches, ')
return
# TODO Finish ask for missing uniforms script
sendteammessage(teams, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs)
if mtype.get()=='All':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_excel(cnf._INPUT_DIR+'\\coaches.csv')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading mastersignups, coaches, ')
return
# TODO Finish ask for missing uniforms script
sendallmessage(season, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs)
return
''' TESTING of notifyfamilies
[sport, team, graderange, coachinfo, playerlist] =cabteamlist[6] i=6
index=thisteam.index[0]
row=thisteam.loc[index]
'''
def readMessage():
''' Choose text file from messages as template for email or log message (w/ find/replace
of team and individual info)
args: none
returns: string with contents of chosen TXT file
'''
def pickMessage():
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
full_path = tk.filedialog.askopenfilename(initialdir = cnf._INPUT_DIR+'\\messages\\', title = 'Choose blank email template',
filetypes=[ ('txt','*.txt')] )
root.destroy() # closes pop up window
return full_path
full_path = pickMessage()
with open(full_path,'r') as file:
blankmess = file.read()
return blankmess
def askforunis():
# TODO finish me
pass
def askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs):
''' Notifying players that need cards and ask for them via custom e-mail (one per player)
kwargs:
choice - 'send' or 'test'
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('parent_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# drop non-CYC K and 1 level teams
teams=teams[teams['Grade']>=2]
# Make list of sport/team/school/graderange
teamlist=[]
for index, row in teams.iterrows():
# get school
if '#' not in teams.loc[index]['Team']:
school='Cabrini'
else:
school=teams.loc[index]['Team'].split('#')[0]
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([teams.loc[index]['Sport'], teams.loc[index]['Team'], school,
teams.loc[index]['Graderange']])
# dict. with each team and its players
cards=findcards() # find all player cards
if not cards: # terminate if no cards are found (path error?)
print("Error opening CYC card image database")
return
# Drop all player nums found in cards
cardslist=list(cards.keys())
cardslist=[i for i in cardslist if '-' not in i]
cardslist=[int(i) for i in cardslist]
# Only keep signups without cards
Mastersignups=Mastersignups[~Mastersignups['Plakey'].isin(cardslist)]
CYCSUs=pd.DataFrame()
for i, [sport, team, school, graderange] in enumerate(teamlist):
CYCSUs=CYCSUs.append(Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)])
# only one notice needed per player
CYCSUs=CYCSUs.drop_duplicates('Plakey')
CYCSUs=pd.merge(CYCSUs, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
for index, row in CYCSUs.iterrows():
# Replace first name in e-mail title (default e-mail title is fall $SPORT for $FIRST)
thistitle=emailtitle.replace('$FIRST', row.First)
thistitle=thistitle.replace('$LAST', row.Last)
# custom message for individual player on this team
thismess=blankmess.replace('$FIRST', row.First)
thismess=thismess.replace('$LAST', row.Last)
recipients=getemailadds(row)
# Create custom email message (can have multiple sports in df)
if choice=='send':
# add From/To/Subject to actual e-mail
thisemail='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '
thisemail+=', '.join(recipients)+'\nSubject: '+thistitle+'\n'
thisemail+=thismess
thisemail=thisemail.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each valid recipient in list
try:
smtpObj.sendmail('<EMAIL>', addr, thisemail)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
if not recipients:
print('No email address for ', row.First, row.Last)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def sendallmessage(season, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('allparent_email_log.txt','w', encoding='utf-8')
# Get all email addresses from recent parents (default last 3 seasons)
recipients=makeemaillist(Mastersignups, famcontact, season, year, SMS=False)
# add all coach emails
coachemails=np.ndarray.tolist(coaches.Email.unique())
coachemails=[i for i in coachemails if '@' in i]
recipients.extend(coachemails)
recipients=set(recipients)
recipients=list(recipients) # unique only
# Create custom email message (can have multiple sports in df)
if choice=='send':
if 'KCtest' in kwargs: # internal only send test
recipients=['<EMAIL>','<EMAIL>']
msg=MIMEText(blankmess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = '<NAME> <<EMAIL>>'
msg['To'] = '<NAME> <<EMAIL>>'
msg['Bcc']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
else: # Testing mode
tempstr='Test message to: '+', '.join(recipients)
logfile.write(tempstr+'\n')
logfile.write(blankmess)
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# smtpObj.quit() # close SMTP connection
return
def sendteammessage(teams, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
recruit - T or F -- add recruiting statement for short teams
mformat - not really yet used ... just sending as text not html
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('team_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# drop extra co-ed K or other entries
teams=teams.drop_duplicates(['Team'])
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
except:
coachinfo=''
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport, row.Team, school,
gradetostring(row.Graderange), coachinfo, row.Playerlist])
# Separate notification for each signup is OK
for i, [sport, team, school, graderange, coach, playerlist] in enumerate(teamlist):
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
# Cabrini team base message
thisteammess=blankmess
thistitle=emailtitle
# Make team-specific replacements in message text and e-mail title
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$COACH', '$PLAYERLIST']):
thisteammess=thisteammess.replace(col, textwrap.fill(teamlist[i][j], width=100))
thistitle=thistitle.replace(col, teamlist[i][j])
# get coach emails
recipients=getcoachemails(team, teams, coaches, **{'asst':True})
# Now get all unique team email addresses (single message to coach and team)
recipients=getallteamemails(thisteam, recipients)
# Create custom email message (can have multiple sports in df)
if choice=='send':
msg=MIMEText(thisteammess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = '<NAME> <<EMAIL>>'
# part2=MIMEText(thismess_html,'alternate')
msg['To']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
if not recipients:
print('No email addresses for team', team)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thisteammess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def makeemaillist(Mastersignups, famcontact, thisseason, thisyear, SMS=False):
'''Return active and inactive families (mainly for e-mail contact list
active if has player in 3 prior sport-seasons (includes current )
'''
# TODO generalize to n prior sports seasons
thisyearSU=Mastersignups[Mastersignups['Year']==thisyear] # take all form
lastyearSU=Mastersignups[Mastersignups['Year']==(thisyear-1)]
lastyearSU=lastyearSU[lastyearSU['Grade']!=8]
seasonlist=['Fall', 'Winter', 'Spring']
pos=seasonlist.index(thisseason)
activeseasons=seasonlist[pos:]
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
activesports=[]
for i, season in enumerate(activeseasons):
sportlist=sportsdict.get(season)
activesports.extend(sportlist)
lastyearSU=lastyearSU[lastyearSU['Sport'].isin(activesports)] # last year's signups incl.
allSU=pd.concat([thisyearSU,lastyearSU],ignore_index=True)
activefams=allSU.Famkey.unique()
emaillist=[]
match=famcontact[famcontact['Famkey'].isin(activefams)]
emails=match.Email1.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emails=match.Email2.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emails=match.Email3.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emaillist=set(emaillist) # eliminate duplicates
emaillist=list(emaillist)
emaillist=[x for x in emaillist if str(x) != 'nan'] # remove nan
emaillist=[x for x in emaillist if str(x) != 'none'] # remove nan
if not SMS: # Drop SMS
emaillist=[x for x in emaillist if not str(x).startswith('314')]
emaillist=[x for x in emaillist if not str(x).startswith('1314')]
return emaillist
def getcabsch(sched, teams, coaches, fields, **kwargs):
''' Return Cabrini containing subset of teams from master schedule
manual save... can then feed csv to sendschedule
kwargs:
sport -- Soccer, VB or whatever
div--- division 5G
school - Cabrini
#TESTING sched=fullsched.copy()
'''
if 'school' in kwargs:
if kwargs.get('school','')=='Cabrini':
# drop transfer teams w/ #
teams=teams[~teams['Team'].str.contains('#')]
if 'sport' in kwargs:
sport=kwargs.get('sport','')
teams=teams[teams['Sport']==sport]
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
# perform any team filtering
sched=sched.rename(columns={'Start':'Time','Venue':'Location','Sched Name':'Division',
'Visitor':'Away'})
teamdict=findschteams(sched, teams, coaches)
cabsched=pd.DataFrame()
for key, [div, schname] in teamdict.items():
match=sched[(sched['Division'].str.startswith(div)) & ((sched['Home'].str.contains(schname)) | (sched['Away'].str.contains(schname)))]
if 'Cabrini' not in schname:
newname=schname.split('/')[0]+'-Cabrini'
match['Home']=match['Home'].str.replace(schname,newname)
match['Away']=match['Away'].str.replace(schname,newname)
# add team column via assign
match=match.assign(Team=key)
# Why isn't team col being copied?
cabsched=cabsched.append(match, ignore_index=True)
print(len(match),' games for team', str(schname))
cabsched['Home']=cabsched['Home'].str.replace('St Frances','')
cabsched['Away']=cabsched['Away'].str.replace('St Frances','')
cabsched=cabsched.sort_values(['Division','Date','Time'])
# now sort
myCols=['Date','Time','Day','Location','Division','Home','Away','Team']
# add col if missing from CYC schedule
for miss in [i for i in myCols if i not in cabsched.columns]:
print(miss,'column missing from full CYC schedule')
cabsched[miss]=''
cabsched=cabsched[myCols] # set in above preferred order
flist=np.ndarray.tolist(cabsched.Location.unique())
missing=[s for s in flist if s not in fields['Location'].tolist()]
if len(missing)>0:
print('Address missing from fields table:',','.join(missing))
# convert to desired string format here (write-read cycle makes it a string anyway)
# cabsched.Time=cabsched.Time.apply(lambda x:datetime.time.strftime(x, format='%I:%M %p'))
#cabsched['Date']=cabsched['Date'].dt.strftime(date_format='%d-%b-%y')
return cabsched
def detectschchange(sched1, sched2):
'''Compare two schedule versions and return unique rows (changed games)
'''
# Convert both to datetime/timestamps if in string format (probably %m/%d/%Y)
if type(sched1.iloc[0]['Date'])==str:
try:
sched1['Date']=sched1['Date'].apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
except:
print('Problem with string to datetime conversion for', sched1.iloc[0]['Date'])
if type(sched2.iloc[0]['Date'])==str:
try:
sched2['Date']=sched2['Date'].apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
except:
print('Problem with string to datetime conversion for', sched2.iloc[0]['Date'])
if type(sched2.iloc[0]['Time'])==str:
try:
# convert to timestamp
sched2['Time']=sched2['Time'].apply(lambda x:datetime.datetime.strptime(x, "%H:%M:%S").time())
# convert to datetime.time
sched2['Time']=sched2['Time'].apply(lambda x:datetime.time(x))
except:
print('Problem with string to datetime conversion for', sched2.iloc[0]['Date'])
# all columns by default, false drops both duplicates leaving unique rows
bothsch=pd.concat([sched1,sched2])
alteredrows=bothsch.drop_duplicates(keep=False)
alteredrows=alteredrows.sort_values(['Date','Time','Division'])
return alteredrows
def makefieldtable(df, fields):
''' Make separate table of field addresses for all fields in
given team's schedule (called by sendschedule)'''
venues=np.ndarray.tolist(df.Location.unique())
venues=[s.strip() for s in venues]
ft=pd.DataFrame()
ft['Location']=venues
ft=pd.merge(ft, fields, how='left', on=['Location'])
ft=ft[['Location','Address']]
return ft
def notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
recruit - T or F -- add recruiting statement for short teams
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('parent_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport.lower(), row.Team, school, gradetostring(row.Graderange),
coachinfo, row.Playerlist])
# dict. with each team and its players
cards=findcards() # find all player cards
if not cards: # terminate if no cards are found (path error?)
print("Error opening CYC card image database")
return
# Separate notification for each signup is OK
for i, [sport, team, school, graderange, coachinfo, playerlist] in enumerate(teamlist):
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
if '#' not in team:
# Cabrini team base message
thisteammess=blankmess
else: # base message for transferred players
thisteammess=blanktransmess
thisteamtitle=emailtitle
# Make team-specific replacements
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$COACH', '$PLAYERLIST']):
thisteammess=thisteammess.replace(col, textwrap.fill(teamlist[i][j], width=100))
thisteamtitle=thisteamtitle.replace(col, teamlist[i][j])
# Check if Cabrini team is short of players (max grade, sport, numplayers)
try:
recmess=makerecmess(team, thisteam['Grade'].max(), sport, len(thisteam))
except:
recmess='' # handles empty teams during testing
# Either blank inserted or generic needs more players request (same for whole team)
thisteammess=thisteammess.replace('$RECRUIT','\n'+recmess)
for index, row in thisteam.iterrows():
# Replace first name in e-mail title (default e-mail title is fall $SPORT for $FIRST)
thistitle=thisteamtitle.replace('$FIRST', row.First)
thistitle=thistitle.replace('$SPORT', row.Sport)
# Check for each players CYC card if necessary (also for older transfer teams)
thiscardmess=makecardmess(row, cards)
# custom message for individual player on this team
thismess=thisteammess.replace('$FIRST', row.First)
thismess=thismess.replace('$LAST', row.Last)
# message is blank if on file or not required and
thismess=thismess.replace('$CYCCARD', '\n'+thiscardmess)
recipients=getemailadds(row)
# Create custom email message (can have multiple sports in df)
if choice=='send':
# add From/To/Subject to actual e-mail
msg=MIMEText(blankmess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = thistitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To'] = 'Cabrini Sports Parents <<EMAIL>>'
msg['Bcc']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
thisemail='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '
thisemail+=', '.join(recipients)+'\nSubject: '+thistitle+'\n'
thisemail+=thismess
thisemail=thisemail.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each valid recipient in list
try:
smtpObj.sendmail('<EMAIL>', addr, thisemail)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
if not recipients:
print('No email address for ', row.First, row.Last)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def makecardmess(row, cards):
''' Determine if card is needed and add generic message to that effect (called by emailparent_tk, notifyparent)
row is Series
'''
cmess=("$FIRST $LAST needs a CYC ID card to play on this team and we do not have one in our files."
"If your child already has this ID card, please take a picture of it and e-mail to <EMAIL>."
"If you don't have one, you can get one online at: https://idcards.cycstl.net/ or at uniform night. "
"For this you need: 1) picture of the child 2) child's birth certificate (or birth document) and 3) $5 fee")
if str(row.Plakey) in cards:
return '' # already on file
# Now handle teams that don't need CYC cards (generally K or 1st)
if '-' not in row.Team: # non-CYC level teams and transfer teams
if '#' not in row.Team: # non-CYC cabrini team
return '' # junior team doesn't require card
else: # determine grade level for transfer team
tempstr=row.Team
tempstr=tempstr.split('#')[1][0:1]
tempstr=tempstr.replace('K','0')
try:
grade=int(tempstr)
if grade<2: # judge dowd or junior transfer team
return ''
except:
print("couldn't determine grade for transfer team")
return ''
# all remaining players need a card
cmess=cmess.replace('$FIRST',row.First)
cmess=cmess.replace('$LAST',row.Last)
cmess=textwrap.fill(cmess, width=100)
return cmess
'''TESTING
makerecmess('teamname', 2, 'T-ball', 14)
textwrap.fill(recmess, width=80)
'''
def makerecmess(team, grade, sport, numplayers):
''' Figure out if team is short of players (based on grade level, sport, Cabteam or not)
'''
recmess=('This team could use more players. If you know anyone who is interested,'
'please inform us at <EMAIL>.')
recmess=textwrap.fill(recmess, width=100)
if '#' in team: # no recruiting for transfer teams
return ''
if grade=='K':
grade=0
else:
grade=int(grade)
if sport=='VB': # 8 for all grades
if numplayers<8:
return recmess
if sport=='Soccer':
if grade>=5: # 11v11 so need 16
if numplayers<16:
return recmess
elif grade<=4 and grade>=2: # 8v8 from 2nd to 4th so 12 is OK
if numplayers<13:
return recmess
elif grade==1: # 7v7 so 11 is OK
if numplayers<12:
return recmess
else: # k is 6v6 so 10 is OK
if numplayers<11:
return recmess
if sport=='Basketball': # 5v5 for all grades so 10 is good
if numplayers<11:
return recmess
if sport=='T-ball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
if sport=='Baseball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
if sport=='Softball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
return ''
def emailcoach_tk(teams, coaches, gdrivedict):
''' tk interface for e-mails to team coaches
some required datasets (players, famcontact, mastersignups) are directly loaded depending on choice
message types (mtypes) are:
unis - send summary of missing uniforms to team coaches
contacts - send contacts and current google drive link
bills - send summary of outstanding bills
'''
root = tk.Tk()
root.title('Send e-mail to coaches')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
billname=tk.StringVar() # file
try:
billfiles=glob.glob('billlist*')
if len(billfiles)>1:
billfile=findrecentfile(billfiles) # return single most recent file
else:
billfile=billfiles[0]
# find most recent billlist file name
billname.set(billfile)
except:
billname.set('billist.csv')
asstbool=tk.BooleanVar() # optional labelling of elements
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
choice=tk.StringVar() # test or send -mail
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Uniopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.NORMAL)
messfile.set('coach_email_outstanding_unis.txt')
# clear current team selector... this autoloads oldteams
for i, val in enumerate(teamdict):
teamlist[i].set(0)
emailtitle.set('Return of uniforms for your Cabrini team')
def Contactopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.DISABLED)
messfile.set('coach_email_contacts.txt')
emailtitle.set('Contact list for your Cabrini team')
def Billopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.NORMAL)
unientry.config(state=tk.DISABLED)
messfile.set('coach_email_outstanding_bills.txt')
emailtitle.set('Fees still owed by your Cabrini team')
def Otheropts():
''' Display relevant choices for other generic message to parents '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.DISABLED)
messfile.set('temp_message.txt')
emailtitle.set('Message from Sponsors Club')
# e-mail title and message file name
rownum=0
tk.Label(root, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(root, textvariable=emailtitle)
titleentry.config(width=30)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(root, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(root, textvariable=messfile)
messentry.config(width=30)
messentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot (radio1)
tk.Radiobutton(root, text='Missing uniforms', value='Unis', variable = mtype, command=Uniopts).grid(row=rownum, column=0)
tk.Radiobutton(root, text='Send contact info', value='Contacts', variable = mtype, command=Contactopts).grid(row=rownum, column=1)
tk.Radiobutton(root, text='Send bill info', value='Bills', variable = mtype, command=Billopts).grid(row=rownum, column=2)
tk.Radiobutton(root, text='Other message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=3)
rownum+=1
asstcheck=tk.Checkbutton(root, variable=asstbool, text='Email asst coaches?')
asstcheck.grid(row=rownum, column=0) # can't do immediate grid or nonetype is returned
rownum+=1
tk.Label(root, text='Bill_list file name').grid(row=rownum, column=0)
billentry=tk.Entry(root, textvariable=billname)
billentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(root, text='Missing uni file name').grid(row=rownum, column=0)
unientry=tk.Entry(root, textvariable=unifilename)
unientry.grid(row=rownum, column=1)
rownum+=1
# insert team selector
# Specific team selector section using checkboxes
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(root, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(root, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(root, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
d=tk.Button(root, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(root, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(root, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(root, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
root.mainloop()
if choice.get()!='abort':
kwargs={}
if choice.get()=='KCtest':
kwargs.update({'KCtest':True})
kwargs.update({'choice':'send'})
else:
kwargs.update({'choice':choice.get()}) # send, KCtest (internal) or test (to log file)
if asstbool.get()==True:
kwargs.update({'asst':True}) # Optional send to asst. coaches if set to True
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
teams=teams.drop_duplicates(['Team','Sport'])
if mtype.get()=='Contacts':
mtype='contacts'
try:
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
players= pd.read_csv('players.csv', encoding='cp437')
famcontact= pd.read_csv('family_contact.csv', encoding='cp437')
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
except:
print('Problem loading mastersignups, players, famcontact')
return
elif mtype.get()=='Bills':
mtype='bills'
try:
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
billlist=pd.read_csv(billfile.get(), encoding='cp437')
kwargs.update({'bills':billlist, 'SUs':Mastersignups})
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
except:
print('Problem loading billlist, mastersignups')
return
elif mtype.get()=='Unis':
mtype='unis'
try:
missing=pd.read_csv(unifilename.get(), encoding='cp437')
oldteams=pd.read_excel('Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
elif mtype.get()=='Other':
# nothing special to load?
pass
emailcoaches(teams, coaches, mtype, emailtitle, messagefile, gdrivedict, **kwargs)
return
def maketextsched(sched,teams, coaches, fields, messagefile, logfile, **kwargs):
''' Concise textable game schedule for cell only people from extracted Cabrini schedule'''
# Convert dates/ times from timestamp to desired string formats for proper output
if type(sched.iloc[0]['Time'])==datetime.time:
sched.Time=sched.Time.apply(lambda x:datetime.time.strftime(x, format='%I:%M %p'))
else:
print('Time format is', type(sched.iloc[0]['Time']))
if type(sched.iloc[0]['Date'])==datetime.time:
sched['Date']=sched['Date'].dt.strftime(date_format='%d-%b-%y')
else:
print('Date format is', type(sched.iloc[0]['Date']))
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
log=open(logfile,'w', encoding='utf-8')
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
# Open generic message header
with open('messages\\'+messagefile, 'r') as file:
blankmess=file.read()
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
except:
coachinfo=''
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get gender
if row.Gender.lower()=='f':
gender='girls'
elif row.Gender.lower()=='m':
gender='boys'
else:
print('Problem finding team gender')
grrang=str(myteams.loc[index]['Graderange'])
if len(grrang)==2:
grrang=grrang[0]+'-'+grrang[1]
if grrang.endswith('2'):
grrang+='nd'
elif grrang.endswith('3'):
grrang+='rd'
else:
grrang+='th'
grrang=grrang.replace('0','K')
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([myteams.loc[index]['Sport'], myteams.loc[index]['Team'], school,
grrang, gender, coachinfo, myteams.loc[index]['Playerlist']])
# get dictionary of teams found/matched in CYC schedule
for i, [sport, team, school, graderange, gender, coachinfo, playerlist] in enumerate(teamlist):
# Either have cabrini only schedule or full CYC schedule
if 'Team' in sched:
thissched=sched[sched['Team']==team].copy()
thissched=thissched[['Date','Time','Day', 'Location']]
else:
print("Couldn't find schedule for", school, str(graderange), sport, team)
continue
if len(thissched)==0:
print('Games not found for ', team)
continue
# TODO construct textable message in log
games=''
for index, row in thissched.iterrows():
# output date, day, time, location
games+=row.Date+' '+row.Day+' '+row.Time+' '+row.Location+'\n'
thismess=blankmess.replace('$SCHEDULE', games)
thismess=thismess.replace('$GRADERANGE', graderange)
thismess=thismess.replace('$GENDER', gender)
thismess=thismess.replace('$SPORT', sport)
# now create/ insert location and address table
thisft=makefieldtable(thissched, fields)
myfields=''
for index, row in thisft.iterrows():
# output date, day, time, location
myfields+=row.Location+' '+row.Address+'\n'
thismess=thismess.replace('$FIELDTABLE', myfields)
log.write(thismess+'\n')
log.close()
return
''' TESTING
teamnamedict=findschteams(sched, teams, coaches)
'''
''' TESTING
sched=pd.read_csv('Cabrini_Bball2018_schedule.csv')
sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[0] i=0
recipients=['<EMAIL>','<EMAIL>']
'''
def sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test' (defaults to test)
recruit - T or F -- add recruiting statement for short teams
mformat - not really yet used ... just sending as text not html
'''
# convert date- time from extracted schedule to desired str format
# type will generally be string (if reloaded) or timestamp (if direct from prior script)
''' if already string just keep format the same, if timestamp or datetime then convert below
if type(sched.iloc[0]['Time'])==str:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M:%S') # convert string to timestamp
'''
if type(sched.iloc[0]['Time'])!=str:
# Then convert timestamp to datetime to desired string format
sched.Time=sched.Time.apply(lambda x:pd.to_datetime(x).strftime(format='%I:%M %p'))
if type(sched.iloc[0]['Date'])==str:
try:
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
try:
sched.Date=pd.to_datetime(sched.Date, format='%Y-%m-%d')
except:
print('Difficulty converting date with format', type(sched.iloc[0]['Date']))
# convert to desired date string format
sched['Date']=sched['Date'].dt.strftime(date_format='%d-%b-%y')
choice=kwargs.get('choice','test')
if choice=='send' or choice=='KCtest':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open(cnf._OUTPUT_DIR+'\\parent_email_log.txt','w', encoding='utf-8')
#%%
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# Should be only one entry per coach
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=row.Fname+' '+ row.Lname+' ('+row.Email+')'
except:
coachinfo=''
else:
school=row.Team.split('#')[0]
coachinfo=''
# Get gender
if row.Gender.lower()=='f':
gender='girl'
elif row.Gender.lower()=='m':
gender='boys'
else:
print('Problem finding team gender')
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport, row.Team, school, gradetostring(row.Graderange),
gender, coachinfo, row.Playerlist])
# get dictionary of teams found/matched in CYC schedule
teamnamedict=findschteams(sched, teams, coaches)
# TESTING sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[i] i=1
#%%
for i, [sport, team, school, graderange, gender, coachinfo, playerlist] in enumerate(teamlist):
# Either have cabrini only schedule or full CYC schedule
if 'Team' in sched:
thissched=sched[sched['Team']==team].copy()
# shorten team name
thissched['Home']=thissched['Home'].str.split('/').str[0]
thissched['Away']=thissched['Away'].str.split('/').str[0]
thissched['Home']=thissched['Home'].str.strip()
thissched['Away']=thissched['Away'].str.strip()
# Times/dates already reformatted
thissched=thissched[['Date','Time','Day','Home','Away','Location']]
else: # handle if an unsorted CYC schedule (not Cab only)
if team in teamnamedict:
[div,schname]=teamnamedict.get(team,'')
thissched=getgameschedule(div,schname, sched)
thissched=thissched[['Date','Time','Day','Division','Home','Away','Location']]
else:
print("Couldn't find schedule for", school, str(graderange), sport, team)
continue
if len(thissched)==0:
print('Games not found for ', team)
continue
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
# Make all team-specific replacements in message body and email title
thisteammess=blankmess
thistitle=emailtitle
# have to use caution due to $TEAMTABLE (common) and $TEAMNAME (rarely used)
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$GENDER', '$COACH', '$PLAYERLIST']):
if j!='$SPORT':
val=teamlist[i][j]
else: # lower-case sport name for replace
val=teamlist[i][j].lower()
try:
thisteammess=thisteammess.replace(col, textwrap.fill(val, width=100))
thistitle=thistitle.replace(col, val)
except:
print("Problem with teamname", val)
continue
# Convert thissched to string table and insert into message
thisteammess=thisteammess.replace('$SCHEDULE', thissched.to_string(index=False, justify='left'))
#Make and insert field table
thisft=makefieldtable(thissched, fields)
thisteammess=thisteammess.replace('$FIELDTABLE', thisft.to_string(index=False, justify='left'))
# Get coach emails
recipients=getcoachemails(team, teams, coaches, **{'asst':True})
# Now get all unique team email addresses (single message to coach and team)...drops nan
recipients=getallteamemails(thisteam, recipients)
if choice=='KCtest': # internal send test
recipients=['<EMAIL>','<EMAIL>']
choice='send'
# Create custom email message (can have multiple sports in df)
if choice=='send':
try: # single simultaneous e-mail to all recipients
msg=MIMEText(thisteammess,'plain')
msg['Subject'] = thistitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To']=','.join(recipients)
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
except:
print('Message to ', team, 'failed.')
if not recipients:
print('No email addresses for team ', team)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thisteammess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
#%%
return
# TESTING
#%%
def makegcals(sched, teams, coaches, fields, season, year, duration=1, **kwargs):
''' Turn standard CYC calendar into google calendar
description: 1-2 girls soccer vs opponent
kwargs:
div - get only calendar for given division
school - Cabrini ... drop transfer teams w/ #
splitcals - separate calendar for each team (default True),
'''
#TODO ... Test after alteration of address field
if 'school' in kwargs:
if kwargs.get('school','')=='Cabrini':
# drop transfer teams w/ #
teams=teams[~teams['Team'].str.contains('#')]
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
# ensure correct formats for separate date and time columns
if type(sched.iloc[0]['Date'])==str:
try: # format could be 10/18/2018 0:00
sched.Date=sched.Date.str.split(' ').str[0]
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
pass
try:
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
pass
try:
sched.Date=pd.to_datetime(sched.Date, format='%Y-%m-%d')
except:
print('Problem converting date format of ', sched.iloc[0]['Date'])
# gcal needs %m/%d/%y (not zero padded)
sched['Date']=sched['Date'].dt.strftime(date_format='%m/%d/%Y')
''' Reformat of time shouldn't be required i.e. 4:30 PM
if type(sched.iloc[0]['Time'])==str:
try:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M %p')
except:
try:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M:%S') # convert string to timestamp
except:
print('Failed conversion of time column... check format')
'''
# Common reformatting of all gcals
sched=sched.rename(columns={'Date':'Start Date','Time':'Start Time'})
# Calculate end time while still a timestamp
sched['End Time']=pd.to_datetime(sched['Start Time']) + datetime.timedelta(hours=1)
sched['End Time']=pd.to_datetime(sched['End Time'])
sched['End Time']=sched['End Time'].apply(lambda x:pd.to_datetime(x).strftime('%I:%M %p'))
# Then convert timestamp to datetime to desired string format
sched['Start Time']=sched['Start Time'].apply(lambda x:pd.to_datetime(x).strftime(format='%I:%M %p'))
# Standard google calendar column names
gcalcols=['Subject','Start Date', 'Start Time', 'End Date','End Time', 'All Day Event', 'Description', 'Location','Private']
sched['All Day Event']='FALSE'
sched['Private']='FALSE'
sched['End Date']=sched['Start Date']
# append short address to location field
sched=pd.merge(sched, fields, on='Location', how='left', suffixes=('','_2'))
# replace blank address (in case not found but shouldn't happen)
sched['Address']=sched['Address'].replace(np.nan,'')
sched['Location']=sched['Location']+' '+sched['Address']
# Cabrini extracted schedule has team name column
teamlist=np.ndarray.tolist(sched.Team.unique())
shortnames=shortnamedict2(teams)
# Get head coach email for team from coaches list
teams=pd.merge(teams, coaches, how='left', on=['Coach ID'], suffixes=('','_2'))
# Optional single calendar format
if not kwargs.get('splitcal', True):
combocal=pd.DataFrame(columns=gcalcols)
for i, team in enumerate(teamlist):
thissch=sched[sched['Team']==team]
# Need to get associated sport from teams
match=teams[teams['Team']==team]
if len(match)==1:
sport=match.iloc[0]['Sport']
email=match.iloc[0]['Email']
else:
sport=''
email=''
print('Sport not found for team', team)
# skip these teams (usually non-Cabrini team w/ Cab players)
continue
# Make unique description column
descr=shortnames.get(team,'')+' '+ sport.lower()
# Use 1-2nd girl soccer as calendar event title/subject
thissch['Subject']=descr
# Prepend grade/gender sport string to team opponents
thissch['Description']=thissch['Home'].str.split('/').str[0] +' vs '+thissch['Away'].str.split('/').str[0]
# prepend string 1-2 girls soccer to each event
thissch['Description']=thissch['Description'].apply(lambda x:descr+': '+x)
cancel='Contact '+str(email)+' for cancellation/reschedule info'
# Add line w/ coach email for cancellation
thissch['Description']=thissch['Description'].apply(lambda x:x+'\r\n'
+ cancel)
thissch=thissch[gcalcols]
if kwargs.get('splitcal', True): # separate save of gcal for each team
fname=cnf._OUTPUT_DIR+'\\gcal_'+descr+'.csv'
thissch.to_csv(fname, index=False)
else: # add to single jumbo cal
combocal=pd.concat([combocal, thissch], ignore_index=True)
if not kwargs.get('splitcal', True):
fname=cnf._OUTPUT_DIR+'\\Cabrini_gcal_'+season.lower()+str(year)+'.csv'
combocal.to_csv(fname, index=False)
return
#%%
def getgameschedule(div, schname, sched):
''' Find and extract game schedule for team with matching name '''
sched=sched.rename(columns={'Game Time':'Time','Division Name':'Division', 'Field Name':'Location','Visitor':'Away','AwayTeam':'Away','Home Team':'Home'})
thissched=sched[sched['Division'].str.startswith(div)]
thissched=thissched[(thissched['Home'].str.contains(schname)) | (thissched['Away'].str.contains(schname))]
# already sorted and date-time strings already converted to preferred format in getcabsch
return thissched
def findschteams(sched, teams, coaches):
''' Find team names as listed in schedule or elsewhere based on division (e.g. 1B)
plus coach and/or school
return dictionary with all internal names and associated CYC schedule div & name '''
sched=sched.rename(columns={'Game Time':'Time','Field Name':'Location','AwayTeam':'Away','Home Team':'Home'})
sched=sched[pd.notnull(sched['Home'])] # drop unscheduled games
# Get identifying info for all teams
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
# Need double entry for double-rostered teams
double=myteams.copy()
double=double[double['Team'].str.contains('!')]
for index, row in double.iterrows():
doublename=row.Team
tok=doublename.split('-')
name1=tok[0]+'-'+tok[1]+'-'+tok[2].split('!')[0]+'-'+tok[3]
name2=tok[0]+'-'+tok[1]+'-'+tok[2].split('!')[1]+'-'+tok[3]
double=double.set_value(index, 'Team', name2)
myteams['Team']=myteams['Team'].str.replace(doublename, name1)
myteams=myteams.append(double)
# First find all Cabrini teams
#%%
teamnamedict={}
for index, row in myteams.iterrows():
# get division index=1 row= myteams.iloc[index]
if '-' in row.Team:
school='Cabrini'
coach=str(row.Lname)
try:
tok=row.Team.split('-')
div=tok[2]
except:
print("Couldn't find division for", row.Team)
continue
# non-cabrini team w/ transfers
elif '#' in row.Team:
school=row.Team.split('#')[0]
coach=str(row.Coach)
if '??' in coach:
coach='nan'
if row.Gender=='m':
div=str(row.Grade)+'B'
else:
div=str(row.Grade)+'G'
else: # typically junior teams
print("no schedule for ", row.Team)
continue
# Get sport, school, division, coach last nameteam, graderange, coach info (first/last/e-mail), playerlist
thisdiv=sched[sched['Division'].str.startswith(div)]
# On rare occasions teams can only have away games
divteams=np.ndarray.tolist(thisdiv['Home'].unique())
divteams.extend(np.ndarray.tolist(thisdiv['Away'].unique()))
divteams=set(divteams)
divteams=list(divteams)
# find this schools teams
thisteam=[team for team in divteams if school.lower() in team.lower()]
# handle multiple teams per grade
if len(thisteam)>1:
thisteam=[team for team in thisteam if coach.lower() in team.lower()]
if len(thisteam)>1: # Same last name? use exact coach match
coach=str(myteams.loc[index]['Coach'])
thisteam=[team for team in thisteam if coach in team.lower()]
if len(thisteam)==1: # found unique name match
# Need division and name due to duplicates problem
try:
teamnamedict.update({row.Team: [div, thisteam[0].strip()]})
except:
print("Couldn't hash", row.Team)
else:
print("Couldn't find unique schedule team name for", row.Team, div)
#%%
return teamnamedict
def shortnamedict(teams):
''' From teams list, make shortened name dictionary for tk display (i.e. 1G-Croat or 3G-Ambrose)'''
teamdict={}
for index, row in teams.iterrows():
# Get coach name or school
if '#' in teams.loc[index]['Team']:
name=teams.loc[index]['Team'].split('#')[0]
else:
name=str(teams.loc[index]['Coach'])
if teams.loc[index]['Gender']=='m':
gend='B'
else:
gend='G'
grrange=str(teams.loc[index]['Graderange'])
grrange=grrange.replace('0','K')
thisname=grrange+gend+'-'+name
teamdict.update({teams.loc[index]['Team']:thisname})
return teamdict
def shortnamedict2(teams):
''' From teams list, make shortened name dictionary for gcal (i.e. 1-2 girls)'''
teamdict={}
for index, row in teams.iterrows():
if teams.loc[index]['Gender']=='m':
gend=' boys'
else:
gend=' girls'
grrange=str(teams.loc[index]['Graderange'])
grrange=grrange.replace('0','K')
if len(grrange)>1:
grrange=grrange[0]+'-'+grrange[1]
if grrange.endswith('2'):
grrange+='nd'
elif grrange.endswith('3'):
grrange+='rd'
try:
if int(grrange[-1]) in range(4,9):
grrange+='th'
except:
pass # p
thisname=grrange+gend
teamdict.update({teams.loc[index]['Team']:thisname})
return teamdict
def findrecentfile(filelist):
''' Return most recently dated file from list of autonamed files .. date format is always 27Jan17 '''
dates=[s.split('_')[1].split('.')[0] for s in filelist]
try:
dates=[datetime.datetime.strptime(val, "%d%b%y") for val in dates]
datepos=dates.index(max(dates)) # position of newest date (using max)
newfile=filelist[datepos]
except:
print('File date comparison failed... using first one')
newfile=filelist[0]
return newfile
'''TESTING
missing=pd.read_csv('missingunilist_29Dec17.csv')
'''
def emailcoaches(teams, coaches, mtype, emailtitle, messagefile, gdrivedict, **kwargs):
''' Send e-mails to all coach: types are contacts, bills, unis (missing uniforms info)
various dfs are passed via kwargs when necessary
4/1/17 works for contacts and unpaid bill summary
HTML message w/ plain text alternate
kwargs: choice -- send, test or KCtest (real internal send )
'''
choice=kwargs.get('choice','test') # send or test (KCtest kwarg true set separately)
print(choice)
if choice=='send': # true send or internal live send to tkc@wustl
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('coach_email_log.txt','w', encoding='utf-8')
# Iterate over teams
teaminfo=[] # list w/ most team info
if mtype=='unis' and 'missing' in kwargs:
# Iterate only over old teams with missing uniforms
missing=kwargs.get('missing',pd.DataFrame())
for index, row in missing.iterrows():
if row.Gender.lower()=='f':
gend='girls'
else:
gend='boys'
# coach and graderange are nan... not needed for missing unis
teaminfo.append([row.Year, row.Sport, gradetostring(row.Grade), gend, row.Team, 'coach',
'graderange', row.Number])
# Replace teams with oldteams
teams=kwargs.get('oldteams',pd.DataFrame())
else: # iterate through current teams for contacts, bills, cards
for index, row in teams.iterrows():
if row.Gender.lower()=='f':
gend='girls'
else:
gend='boys'
teaminfo.append([row.Year, row.Sport, gradetostring(row.Grade), gend, row.Team, row.Coach,
gradetostring(row.Graderange), row.Number])
with open(messagefile,'r') as mess:
message=mess.read() # generic e-mail message body with limited find/replace
# insert up to date google drive link for this season (various options)
if 'GDRIVE' in message: # global replacements (for all teams)
for key, val in gdrivedict.items():
message=message.replace(key, val)
for i, [year, sport, grade, gender, team, coach, graderange, numplayers] in enumerate(teaminfo):
# Make all team-specific replacements in message body and email title
thisteammess=message
thistitle=emailtitle
for j, col in enumerate(['$YEAR', '$SPORT', '$GRADE', '$GENDER', '$TEAMNAME', '$COACH', '$GRADERANGE', '$NUMBER']):
thisteammess=thisteammess.replace(col, str(teaminfo[i][j]))
thistitle=thistitle.replace(col, str(teaminfo[i][j]))
# Replace missing CYC cards list if requested by message
if '$MISSINGCARDS' in message:
if 'SUs' not in kwargs:
print('Signups needed to find missing cards')
return
carddict=findcards()
SUs=kwargs.get('SUs','')
missingstr=findmissingcards(team, SUs, carddict)
thisteammess=thisteammess.replace('$MISSINGCARDS', missingstr)
# Get head coach e-mail address (list optionally incl. assistants in kwargs)
if 'KCtest' not in kwargs:
coachemail=getcoachemails(team, teams, coaches, **kwargs)
else:
if i==0: # send first message only as live test
coachemail=['<EMAIL>','<EMAIL>']
else:
coachemail=[]
if coachemail==[]: # list of head and asst coaches
print('No valid coach e-mail for '+ team +'\n')
continue
# handle the special message cases
if mtype=='bills': # replacement of teamtable for bills
if 'SUs' and 'bills' not in kwargs:
print('Signups and billing list needed for e-mail send bill to coaches option.')
return
SUs=kwargs.get('SUs','')
bills=kwargs.get('bills','')
teambilltable=makebilltable(team, bills, SUs)
if teambilltable=='': # team's all paid up, skip e-mail send
print('All players paid for team'+team+'\n')
continue # no e-mail message sent
thismess_html=thisteammess.replace('$TEAMTABLE', teambilltable.to_html(index=False))
thismess_plain=thisteammess.replace('$TEAMTABLE', teambilltable.to_string(index=False))
elif mtype=='contacts': # replacement of teamtable for contact
if 'SUs' and 'players' and 'famcontact' not in kwargs:
print('Signups, player and family contact info needed for contact lists to coaches option.')
return
SUs=kwargs.get('SUs','')
# Collapse track sub-teams to single track team
SUs['Team']=SUs['Team'].str.replace(r'track\d+', 'Track', case=False)
SUs=SUs[SUs['Year']==year] # in case of duplicated team name, filter by year
players=kwargs.get('players','')
famcontact=kwargs.get('famcontact','')
contacttable=makecontacttable(team, SUs, players, famcontact)
# Find/replace e-mail addresses
# Convert df to text
thismess_html=thisteammess.replace('$TEAMTABLE', contacttable.to_html(index=False))
thismess_plain=thisteammess.replace('$TEAMTABLE', contacttable.to_string(index=False))
elif mtype=='unis': # replacement of teamtable for uniform returns
# Probably need current and former teams
if 'SUs' and 'oldteams' and 'missing' not in kwargs:
print('Signups, old teams and missing uniform list needed for e-mail unis to coaches option.')
return
# in this case teams iterator with have old not current teams
unitable=makeunitable(team, missing)
thismess_html=thisteammess.replace('$TEAMTABLE', unitable.to_html(index=False))
thismess_plain=thisteammess.replace('$TEAMTABLE', unitable.to_string(index=False))
else: # generic message w/o $TEAMTABLE
thismess_html=thisteammess
thismess_plain=thisteammess
if choice=='send':
try:
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
part1=MIMEText(thismess_plain,'plain')
part2=MIMEText(thismess_html,'alternate')
msg['To']=','.join(coachemail) # single e-mail or list
msg.attach(part1) # plain text
msg.attach(part2) # html (last part is preferred)
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', coachemail, msg.as_string())
print ('Message sent to ', ','.join(coachemail))
except:
print('Message to ', ','.join(coachemail), ' failed.')
else: # testing only... open log file
logfile.write(emailtitle+'\n')
logfile.write(thismess_plain+'\n')
return
def gradetostring(val):
''' Turns grade or grade ranges into strings with appropriate ending 23 becomes '2-3rd' '''
if len(str(val))==2:
val=str(val)[0]+'-'+str(val)[1]
else:
val=str(val)
if val.endswith('1'):
val+='st'
elif val.endswith('2'):
val+='nd'
elif val.endswith('3'):
val+='rd'
else:
val+='th'
return val
def getallteamemails(df, emails):
''' Get all unique e-mails associated with team
passed emails contains coach emails already extracted
email1, email2, and email3 columns all present in family contacts'''
emails=np.ndarray.tolist(df.Email1.unique())
emails.extend(np.ndarray.tolist(df.Email2.unique()))
emails.extend(np.ndarray.tolist(df.Email3.unique()))
emails=set(emails)
emails=list(emails)
emails=[i for i in emails if str(i)!='nan']
return emails
def getcoachemails(team, teams, coaches, **kwargs):
''' Returns head coach e-mail for given team and optionally asst coaches '''
teams=teams.drop_duplicates('Team') # drop coed team duplicate
thisteam=teams[teams['Team']==team]
emails=[]
IDs=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return emails # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return emails # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
IDs.append(thisteam.iloc[0]['Coach ID'])
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if kwargs.get('asst', False): # optional send to asst coaches
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
IDs.extend(asstIDs)
# now find email addresses for this set of CYC IDs
thesecoaches=coaches[coaches['Coach ID'].isin(IDs)]
thesecoaches=thesecoaches.dropna(subset=['Email'])
emails=np.ndarray.tolist(thesecoaches.Email.unique()) # surely won't have blank string
return emails
def makeunitable(team, missing):
''' Make missing uniform table for auto-emailing to head coach; looping over old teams and
unis identified as not yet returned from prior seasons
'''
# Could be problem with non-unique team
thisteam=missing[missing['Team']==team]
mycols=['First', 'Last', 'Issue date', 'Sport', 'Year', 'Uniform#', 'Team']
thisteam=thisteam[mycols]
thisteam=thisteam.replace(np.nan,'')
return thisteam
def makecontacttable(team, SUs, players, famcontacts):
''' Make team contacts list for auto-emailing to head coach
first, last, grade, school, phone/text/email 1&2 '''
# Find subset of all signups from this team
thisteam=SUs[SUs['Team']==team]
# Get school from players
thisteam=pd.merge(thisteam, players, on='Plakey', how='left', suffixes=('','_r'))
# Get other contact info from family contacts
thisteam= | pd.merge(thisteam, famcontacts, on='Famkey', how='left', suffixes=('','_r')) | pandas.merge |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import re
import numpy as np
import pandas as pd
import itertools
from collections import OrderedDict
from tqdm.auto import tqdm
import datetime
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from logging import getLogger
logger = getLogger("splt")
def load(DO_TEST = False):
"""raw csvとweaponなど外部データを読み込んでjoin"""
train = pd.read_csv("../data/train_data.csv")
test = | pd.read_csv('../data/test_data.csv') | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datatable
from sklearn import metrics
import argparse
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RidgeClassifier
import xgboost as xgb
import shap
import random
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
"""
Example use:
To output SHAP explanations for a given model
python3 machine_learning.py --target=likes --estimators=250 --language_model=roberta --ml_model=XGB --shap_out --mixed_features
To output cross validated model predictions
python3 machine_learning.py --target=likes --estimators=2 --language_model=glove --ml_model=all
To output cross validated model predictions with mixed features (features from language model + structured)
python3 machine_learning.py --target=likes --estimators=2 --language_model=glove --ml_model=all --mixed_features
--target: defines target variable
--estimators: number of estimators in machine learning classification models
--language_model: feature set provided by selected language model
--ml_model: machine learning classification model
--shap_out: enables only SHAP explanations for the selected ml_model and fold instead of cross validated tests
--mixed_features: enables to add structured features to a selected language model
To repeat experiments carried out in our work use two bash scripts:
bash ./grid_predict
bash ./grid_predict_mixed_features
"""
parser = argparse.ArgumentParser(description='Carry out machine learning')
parser.add_argument('--target', required=True, type=str, default='retweets', help="possible options:"
"likes, retweets, replies")
parser.add_argument('--language_model', required=True, type=str, default='glove',
help="possible options: fasttext, glove, distilbert, roberta, structured, all")
parser.add_argument('--estimators', required=False, type=int, default=250,
help="number of estimators in machine learning classification models")
parser.add_argument('--fold', required=False, type=int, default=0, help="choose fold in range: 0-4")
parser.add_argument('--shap_out', required=False, default=False, action='store_true', help='output SHAP explanations')
parser.add_argument('--ml_model', required=False, type=str, default='RF', help="possible options: RF, XGB, Ridge, all")
parser.add_argument('--mixed_features', required=False, action='store_true', default=False,
help="enables to add structured features to a selected language model")
args = parser.parse_args()
_target = args.target
_estimators = args.estimators
_fold = args.fold
_language_model = args.language_model
_ml_model = args.ml_model
_shap_out = args.shap_out
_mixed_features = args.mixed_features
# read source data
source_data = datatable.fread("./data/from_USNavy_for_flair.csv").to_pandas()
# define which classification models to use
ml_model_selector = {
"XGB": xgb.XGBClassifier(objective='multi:softprob', n_jobs=24, learning_rate=0.03,
max_depth=10, subsample=0.7, colsample_bytree=0.6,
random_state=2020, n_estimators=_estimators),
"RF": RandomForestClassifier(n_estimators=_estimators, max_depth=7, min_samples_split=2,
min_samples_leaf=1, max_features='auto', n_jobs=-1, random_state=2020),
"Ridge": RidgeClassifier(),
"MLP": MLPClassifier(hidden_layer_sizes=(8, 8, 8), activation='relu', solver='adam', max_iter=2000)
}
# define timestamp as index
source_data['timestamp'] = | pd.to_datetime(source_data['timestamp'], errors='coerce') | pandas.to_datetime |
# -*- coding: utf-8 -*-
# scripts
import wikilanguages_utils
# time
import time
import datetime
# system
import os
import sys
import re
from IPython.display import HTML
# databases
import MySQLdb as mdb, MySQLdb.cursors as mdb_cursors
import sqlite3
# files
import codecs
# requests and others
import requests
import urllib
# data
import pandas as pd
# pywikibot
import pywikibot
PYWIKIBOT2_DIR = '/srv/wcdo/user-config.py'
from dash import Dash
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_table_experiments as dt
from dash.dependencies import Input, Output, State
class Logger(object): # this prints both the output to a file and to the terminal screen.
def __init__(self):
self.terminal = sys.stdout
self.log = open("meta_update.out", "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self): pass
# MAIN
######################## WCDO CREATION SCRIPT ########################
def main():
publish_wcdo_update_meta_pages()
######################################################################
### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### --- ### ---
# TABLES
# function name composition rule: x, y, (rows, columns)
# In this function we create the table language_territories_mapping.
def make_table_language_territories_mapping():
df = pd.read_csv(databases_path + 'language_territories_mapping.csv',sep='\t',na_filter = False)
df = df[['territoryname','territorynameNative','QitemTerritory','languagenameEnglishethnologue','WikimediaLanguagecode','demonym','demonymNative','ISO3166','ISO31662','regional','country','indigenous','languagestatuscountry','officialnationalorregional']]
territorylanguagecodes_original = list(df.WikimediaLanguagecode.values)
indexs = df.index.values.tolist()
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('-','_')
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('be_tarask', 'be_x_old')
df.WikimediaLanguagecode = df['WikimediaLanguagecode'].str.replace('nan', 'zh_min_nan')
languagenames={}
updated_langcodes = list(df.WikimediaLanguagecode.values)
for x in range(0,len(territorylanguagecodes_original)):
curindex = indexs[x]
languagenames[curindex]=languages.loc[updated_langcodes[x]]['languagename']
df['Language Name'] = pd.Series(languagenames)
languagecodes={}
for x in range(0,len(territorylanguagecodes_original)):
curindex = indexs[x]
curlanguagecode = territorylanguagecodes_original[x]
languagecodes[curindex]=curlanguagecode
df['WikimediaLanguagecode'] = pd.Series(languagecodes)
# languagenames_local={}
# for languagecode in territorylanguagecodes:
# languagenames_local[languagecode]=languages.loc[languagecode]['languagename']
# df['Language Local'] = pd.Series(languagenames_local)
df = df.reset_index()
df = df.fillna('')
qitems={}
indexs = df.index.tolist()
qitems_list = list(df.QitemTerritory.values)
for x in range(0,len(qitems_list)):
curqitem = qitems_list[x]
curindex = indexs[x]
if curqitem != None and curqitem!='': qitems[curindex]='[[wikidata:'+curqitem+'|'+curqitem+']]'
else: qitems[curindex]=''
df['Qitems'] = pd.Series(qitems)
columns = ['Language Name','WikimediaLanguagecode','Qitems','territorynameNative','demonymNative','ISO3166','ISO31662']
# columns = ['Language Name','WikimediaLanguagecode','Qitems','territoryname','territorynameNative','demonymNative','ISO3166','ISO31662','country']
df = df[columns] # selecting the parameters to export
# print (df.head())
columns_dict = {'Language Name':'Language','WikimediaLanguagecode':'Wiki','Qitems':'WD Qitem','territoryname':'Territory','territorynameNative':'Territory (Local)','demonymNative':'Demonyms (Local)','ISO3166':'ISO 3166', 'ISO3662':'ISO 3166-2','country':'Country'}
df=df.rename(columns=columns_dict)
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
header_string = header_string + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
row_string = row_string + str(value) + add # here is the value
# here we might add colors. -> it would be nice to make a different colour for each language background, so it would be easy to see when one starts and another finishes.
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
wikitext = '* Generated at '+datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S')+'\n'
wikitext += wiki_table_string
return wikitext
def make_table_ccc_extent_all_languages():
# QUESTION: What is the extent of cultural context content in each language edition?
# percentatge de contingut únic (sense cap ILL) -> pensar si posar-lo a la taula de extent.
# OBTAIN AND FORMAT THE DATA.
conn = sqlite3.connect(databases_path + 'wcdo_stats.db'); cursor = conn.cursor()
df = pd.DataFrame(wikilanguagecodes)
df = df.set_index(0)
reformatted_wp_numberarticles = {}
for languagecode,value in wikipedialanguage_numberarticles.items():
reformatted_wp_numberarticles[languagecode]='{:,}'.format(int(value))
df['wp_number_articles']= pd.Series(reformatted_wp_numberarticles)
# CCC %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "wp" AND set2descriptor = "ccc" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
rank_dict = {}; i=1
lang_dict = {}
abs_rel_value_dict = {}
for row in cursor.execute(query):
lang_dict[row[0]]=languages.loc[row[0]]['languagename']
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
rank_dict[row[0]]=i
i=i+1
df['Language'] = pd.Series(lang_dict)
df['Nº'] = pd.Series(rank_dict)
df['ccc_number_articles'] = pd.Series(abs_rel_value_dict)
# CCC GL %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "wp" AND set2descriptor = "ccc_geolocated" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
abs_rel_value_dict = {}
for row in cursor.execute(query):
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
df['geolocated_number_articles'] = pd.Series(abs_rel_value_dict)
# CCC KW %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "wp" AND set2descriptor = "ccc_keywords" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
abs_rel_value_dict = {}
for row in cursor.execute(query):
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
df['keyword_title'] = pd.Series(abs_rel_value_dict)
# CCC People %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "wp" AND set2descriptor = "ccc_people" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
abs_rel_value_dict = {}
for row in cursor.execute(query):
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
df['people_ccc_percent'] = pd.Series(abs_rel_value_dict)
# CCC Female %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "ccc" AND set2descriptor = "female" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
female_abs_value_dict = {}
for row in cursor.execute(query):
female_abs_value_dict[row[0]]=row[1]
df['female_ccc'] = pd.Series(female_abs_value_dict)
# CCC Male %
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE set1descriptor = "ccc" AND set2descriptor = "male" AND content = "articles" AND set1=set2 AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC'
male_abs_value_dict = {}
for row in cursor.execute(query):
male_abs_value_dict[row[0]]=row[1]
df['male_ccc'] = pd.Series(male_abs_value_dict)
df = df.fillna(0)
df['male_ccc'] = df.male_ccc.astype(str)
df['female_ccc'] = df.female_ccc.astype(str)
df['people_ccc_percent'] = df.people_ccc_percent.astype(str)
female_male_CCC={}
for x in df.index.values.tolist():
sumpeople = int(float(df.loc[x]['male_ccc']))+int(float(df.loc[x]['female_ccc']))
if sumpeople != 0:
female_male_CCC[x] = str(round(100*int(float(df.loc[x]['female_ccc']))/sumpeople,1))+'%\t-\t'+str(round(100*int(float(df.loc[x]['male_ccc']))/sumpeople,1))+'%'
else:
female_male_CCC[x] = '0.0%'+'\t-\t'+'0.0%'
df['female-male_ccc'] = pd.Series(female_male_CCC)
df['Region']=languages.region
for x in df.index.values.tolist():
if ';' in df.loc[x]['Region']: df.at[x, 'Region'] = df.loc[x]['Region'].split(';')[0]
WPlanguagearticle={}
for x in df.index.values: WPlanguagearticle[x]='[[:'+x.replace('_','-')+':|'+x.replace('_','-')+']]'
df['Wiki'] = pd.Series(WPlanguagearticle)
languagelink={}
for x in df.index.values:
languagelink[x]='[[w:'+languages.loc[x]['WikipedialanguagearticleEnglish'].split('/')[4].replace('_',' ')+'|'+languages.loc[x]['languagename']+']]'
df['Language'] = pd.Series(languagelink)
# Renaming the columns
columns_dict = {'wp_number_articles':'Articles','ccc_number_articles':'CCC (%)','geolocated_number_articles':'CCC GL (%)','keyword_title':'KW Title (%)','female-male_ccc':'CCC Female-Male %','people_ccc_percent':'CCC People (%)'}
df=df.rename(columns=columns_dict)
df = df.reset_index()
# Choosing the final columns
columns = ['Nº','Language','Wiki','Articles','CCC (%)','CCC GL (%)','KW Title (%)','CCC People (%)','CCC Female-Male %','Region']
df = df[columns] # selecting the parameters to export
# WIKITEXT
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
dict_data_type = {'CCC (%)':'data-sort-type="number"|','CCC GL (%)':'data-sort-type="number"|','KW Title (%)':'data-sort-type="number"|','CCC People (%)':'data-sort-type="number"|','CCC Female-Male %':'data-sort-type="number"|'}
header_string = '!'
for x in range(0,len(df_columns_list)):
if x == len(df_columns_list)-1: add = ''
else: add = '!!'
data_type = ''
if df_columns_list[x] in dict_data_type: data_type = ' '+dict_data_type[df_columns_list[x]]
header_string = header_string + data_type + df_columns_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
row_string = row_string + str(value) + add # here is the value
# here we might add colors.
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
wikitext = '* Statistics at '+datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S')+'\n'
wikitext += wiki_table_string
return wikitext
def make_table_langs_langs_ccc(): # COVERAGE
# QUESTION: How well each language edition covers the CCC of each other language edition?
# OBTAIN THE DATA.
conn = sqlite3.connect(databases_path + 'wcdo_stats.db'); cursor = conn.cursor()
coverage_art = {}
t_coverage = {}
query = 'SELECT set2, abs_value, rel_value FROM wcdo_intersections WHERE set1="all_ccc_articles" AND set1descriptor="" AND set2descriptor="wp" ORDER BY set2;'
for row in cursor.execute(query):
coverage_art[row[0]]=row[1]
t_coverage[row[0]]=round(row[2],2)
r_coverage = {}
query = 'SELECT set2, rel_value FROM wcdo_intersections WHERE set1="all_ccc_avg" AND set1descriptor="" AND set2descriptor="wp" ORDER BY set2;'
for row in cursor.execute(query):
r_coverage[row[0]]=round(row[1],2)
language_dict={}
query = 'SELECT set2, set1, rel_value FROM wcdo_intersections WHERE content="articles" AND set1descriptor="ccc" AND set2descriptor = "wp" AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY set2, abs_value DESC;'
ranking = 5
row_dict = {}
i=1
languagecode_covering='aa'
for row in cursor.execute(query):
cur_languagecode_covering=row[0]
if cur_languagecode_covering!=languagecode_covering: # time to save
row_dict['language']=languages.loc[languagecode_covering]['languagename']
row_dict['WP articles']='{:,}'.format(int(wikipedialanguage_numberarticles[languagecode_covering]))
row_dict['relative_coverage_index']=r_coverage[languagecode_covering]
row_dict['total_coverage_index']=t_coverage[languagecode_covering]
row_dict['coverage_articles_sum']='{:,}'.format(int(coverage_art[languagecode_covering]))
language_dict[languagecode_covering]=row_dict
row_dict = {}
i = 1
if i <= ranking:
languagecode_covered=row[1]
rel_value=round(row[2],2)
languagecode_covered = languagecode_covered.replace('be_tarask','be_x_old')
languagecode_covered = languagecode_covered.replace('zh_min_nan','nan')
languagecode_covered = languagecode_covered.replace('zh_classical','lzh')
languagecode_covered = languagecode_covered.replace('_','-')
value = languagecode_covered + ' ('+str(rel_value)+'%)'
if rel_value == 0: value ='<small>0</small>'
else: value = '<small>'+value+'</small>'
row_dict[str(i)]=value
i+=1
languagecode_covering = cur_languagecode_covering
column_list_dict = {'language':'Language', 'WP articles':'Articles','1':'nº1','2':'nº2','3':'nº3','4':'nº4','5':'nº5','relative_coverage_index':'R.Coverage','total_coverage_index':'T.Coverage','coverage_articles_sum':'Coverage Art.'}
column_list = ['Language','Articles','nº1','nº2','nº3','nº4','nº5','R.Coverage','T.Coverage','Coverage Art.']
df=pd.DataFrame.from_dict(language_dict,orient='index')
df=df.rename(columns=column_list_dict)
df = df[column_list] # selecting the parameters to export
df = df.fillna('')
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
# WIKITEXT
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
dict_data_type = {'CCC %':'data-sort-type="number"|'}
header_string = '!'
for x in range(0,len(column_list)):
if x == len(column_list)-1: add = ''
else: add = '!!'
data_type = ''
if df_columns_list[x] in dict_data_type: data_type = ' '+dict_data_type[df_columns_list[x]]
header_string = header_string + data_type + column_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
if value == '': value = 0
row_string = row_string + str(value) + add # here is the value
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
# Returning the Wikitext
wikitext = '* Statistics at '+datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S')+'\n'
wikitext += wiki_table_string
return wikitext
def make_table_langs_ccc_langs(): # SPREAD
# QUESTION: How well each language edition CCC is spread in other language editions?
# TABLE COLUMN (spread):
# language, CCC%, RANKING TOP 5, relative spread index, total spread index, spread articles sum.
# relative spread index: the average of the percentages it occupies in other languages.
# total spread index: the overall percentage of spread of the own CCC articles. (sum of x-lang CCC in every language / sum of all articles in every language)
# spread articles sum: the number of articles from this language CCC in all languages.
# OBTAIN THE DATA.
conn = sqlite3.connect(databases_path + 'wcdo_stats.db'); cursor = conn.cursor()
ccc_percent_wp = {}
query = 'SELECT set1, abs_value, rel_value FROM wcdo_intersections WHERE content="articles" AND set1 = set2 AND set1descriptor="wp" AND set2descriptor = "ccc";'
for row in cursor.execute(query):
value = row[1]
if value == None: value = 0
ccc_number_articles = '{:,}'.format(int(value))
value2 = row[2]
if value2 == None: value2 = 0
ccc_percent_wp[row[0]]=ccc_number_articles+' <small>'+'('+str(round(value2,2))+'%)</small>'
spread_art = {}
t_spread = {}
query = 'SELECT set2, abs_value, rel_value FROM wcdo_intersections WHERE content="articles" AND set1="all_wp_all_articles" AND set1descriptor="" AND set2descriptor="ccc" ORDER BY set2;'
for row in cursor.execute(query):
spread_art[row[0]]=row[1]
t_spread[row[0]]=round(row[2],2)
r_spread = {}
query = 'SELECT set2, rel_value FROM wcdo_intersections WHERE content="articles" AND set1="all_wp_avg" AND set1descriptor="" AND set2descriptor="ccc" ORDER BY set2;'
for row in cursor.execute(query):
r_spread[row[0]]=round(row[1],2)
language_dict={}
query = 'SELECT set2, set1, rel_value FROM wcdo_intersections WHERE content="articles" AND set2descriptor="ccc" AND set1descriptor = "wp" AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY set2, abs_value DESC;'
ranking = 5
row_dict = {}
i=1
languagecode_spreading='aa'
for row in cursor.execute(query):
cur_languagecode_spreading=row[0]
if row[0]==row[1]: continue
if cur_languagecode_spreading!=languagecode_spreading: # time to save
row_dict['language']=languages.loc[languagecode_spreading]['languagename']
try:
row_dict['CCC articles']=ccc_percent_wp[languagecode_spreading]
except:
row_dict['CCC articles']=0
try:
row_dict['relative_spread_index']=r_spread[languagecode_spreading]
except:
row_dict['relative_spread_index']=0
try:
row_dict['total_spread_index']=t_spread[languagecode_spreading]
except:
row_dict['total_spread_index']=0
try:
row_dict['spread_articles_sum']='{:,}'.format(int(spread_art[languagecode_spreading]))
except:
row_dict['spread_articles_sum']=0
language_dict[languagecode_spreading]=row_dict
row_dict = {}
i = 1
# input('')
if i <= ranking:
languagecode_spread=row[1]
rel_value=round(row[2],2)
languagecode_spread = languagecode_spread.replace('be_tarask','be_x_old')
languagecode_spread = languagecode_spread.replace('zh_min_nan','nan')
languagecode_spread = languagecode_spread.replace('zh_classical','lzh')
languagecode_spread = languagecode_spread.replace('_','-')
value = languagecode_spread + ' ('+str(rel_value)+'%)'
if rel_value == 0: value ='<small>0</small>'
else: value = '<small>'+value+'</small>'
row_dict[str(i)]=value
# print (cur_languagecode_spreading,languagecode_spread,i,value)
languagecode_spreading = cur_languagecode_spreading
i+=1
column_list_dict = {'language':'Language', 'CCC articles':'CCC %','1':'nº1','2':'nº2','3':'nº3','4':'nº4','5':'nº5','relative_spread_index':'R.Spread','total_spread_index':'T.Spread','spread_articles_sum':'Spread Art.'}
column_list = ['Language','CCC %','nº1','nº2','nº3','nº4','nº5','R.Spread','T.Spread','Spread Art.']
df=pd.DataFrame.from_dict(language_dict,orient='index')
df=df.rename(columns=column_list_dict)
df = df[column_list] # selecting the parameters to export
df = df.fillna('')
df_columns_list = df.columns.values.tolist()
df_rows = df.values.tolist()
# WIKITEXT
class_header_string = '{| border="1" cellpadding="2" cellspacing="0" style="width:100%; background: #f9f9f9; border: 1px solid #aaaaaa; border-collapse: collapse; white-space: nowrap; text-align: right" class="sortable"\n'
dict_data_type = {'CCC %':'data-sort-type="number"|','nº1':'data-sort-type="number"|','nº2':'data-sort-type="number"|','nº3':'data-sort-type="number"|','nº4':'data-sort-type="number"|','nº5':'data-sort-type="number"|'}
header_string = '!'
for x in range(0,len(column_list)):
if x == len(column_list)-1: add = ''
else: add = '!!'
data_type = ''
if df_columns_list[x] in dict_data_type: data_type = ' '+dict_data_type[df_columns_list[x]]
header_string = header_string + data_type + column_list[x] + add
header_string = header_string + '\n'
rows = ''
for row in df_rows:
midline = '|-\n'
row_string = '|'
for x in range(0,len(row)):
if x == len(row)-1: add = ''
else: add = '||'
value = row[x]
if value == '': value = 0
color = ''
row_string = row_string + str(value) + add # here is the value
row_string = midline + row_string + '\n'
rows = rows + row_string
closer_string = '|}'
wiki_table_string = class_header_string + header_string + rows + closer_string
# Returning the Wikitext
wikitext = '* Statistics at '+datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S')+'\n'
wikitext += wiki_table_string
return wikitext
def make_table_geolocated_articles():
conn = sqlite3.connect(databases_path + 'wcdo_stats.db'); cursor = conn.cursor()
country_names, regions, subregions = wikilanguages_utils.load_iso_3166_to_geographical_regions()
country_names_copy = list(country_names.keys())
# COUNTRIES
# all qitems queries
query = 'SELECT set2descriptor, abs_value, rel_value FROM wcdo_intersections WHERE set1 = "wikidata_article_qitems" AND set1descriptor = "geolocated" AND set2 = "countries" AND content = "articles" AND measurement_date IN (SELECT MAX(measurement_date) FROM wcdo_intersections) ORDER BY rel_value DESC;'
rank_dict = {}; i=1
abs_rel_value_dict = {}
for row in cursor.execute(query):
abs_rel_value_dict[row[0]]=' '+str('{:,}'.format(int(row[1]))+' '+'<small>('+str(round(row[2],2))+'%)</small>')
rank_dict[row[0]]=str(i)
i=i+1
country_names_copy.remove(row[0])
for country_code in country_names_copy:
rank_dict[country_code]=str(i)
abs_rel_value_dict[country_code]=' 0 <small>(0.0%)</small>'
i=i+1
df = pd.DataFrame.from_dict(country_names,orient='index')
df['Country'] = pd.Series(country_names)
df['Nº'] = pd.Series(rank_dict)
df['Geolocated Qitems (%)'] = | pd.Series(abs_rel_value_dict) | pandas.Series |
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import time
#Loaded Data
train_data = pd.read_csv("../input/train.csv")
test_data = pd.read_csv("../input/test.csv")
test_dataWithId = pd.read_csv("../input/test.csv")
#Training data
y = train_data[['Cover_Type']]
X = train_data.drop(['Cover_Type'], axis=1)
#Drop Id from Training and Test data
X = X.drop(['Id'], axis=1)
test_data = test_data.drop(['Id'], axis=1)
idx = 10
cols = list(X.columns.values)[:idx]
X[cols] = StandardScaler().fit_transform(X[cols])
test_data[cols] = StandardScaler().fit_transform(test_data[cols])
# svm_parameters = [{'kernel': ['rbf'], 'C': [1,10,100,1000]}]
# model = GridSearchCV(SVC(), svm_parameters, cv=3, verbose=2)
# model.fit(X, y.iloc[:,0])
# print(model.best_params_)
# print(model.cv_results_)
model = SVC(C=1000, kernel='rbf')
model.fit(X, y.iloc[:,0])
print(model.score(X, y.iloc[:,0]))
predictions = model.predict(test_data)
print(predictions)
c1 = | pd.DataFrame(test_dataWithId["Id"]) | pandas.DataFrame |
import tensorflow as tf
import pandas as pd
import numpy as np
import time
import os
def train(drop_prob, dataset_test, normal_scale, sav=True, checkpoint_file='default.ckpt'):
input_image = tf.placeholder(tf.float32, batch_shape_input, name='input_image')
is_training = tf.placeholder(tf.bool)
scale = 0.
with tf.variable_scope('FCN') as scope:
fc_1 = tf.layers.dense(inputs=input_image, units=4000,
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=scale))
fc_1_out = tf.nn.sigmoid(fc_1)
fc_1_dropout = tf.layers.dropout(inputs=fc_1_out, rate=drop_prob, training=is_training)
fc_2_dropout = tf.layers.dense(inputs=fc_1_dropout, units=RNA_size) # 46744
fc_2_out = tf.nn.sigmoid(fc_2_dropout) # fc_2_dropout #
reconstructed_image = fc_2_out # fc_2_dropout
original = tf.placeholder(tf.float32, batch_shape_output, name='original')
loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(reconstructed_image, original))))
l2_loss = tf.losses.get_regularization_loss()
optimizer = tf.train.AdamOptimizer(lr).minimize(loss + l2_loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
start = time.time()
loss_val_list_train = 0
loss_val_list_test = 0
loss_test = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as session:
session.run(init)
print(("Loading variables from '%s'." % checkpoint_file))
saver.restore(session, checkpoint_file)
print('restored')
############### test the pretrain model for target dataset
dataset_test = np.asarray(dataset_test).astype("float32")
reconstruct = session.run(reconstructed_image,
feed_dict={input_image: dataset_test[:, RNA_size:], is_training: False})
end = time.time()
el = end - start
print(("Time elapsed %f" % el))
return (loss_val_list_train, loss_val_list_test, loss_test, loss_test_pretrain, reconstruct)
#############################################################################################################
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
original_dat_path_DNA = '/data0/zhoux/DNA_WT.csv'
imputed_dataset_path = '/data0/zhoux/imputed_RNA_WT.csv'
DNA_target = | pd.read_csv(original_dat_path_DNA, delimiter=',',index_col=0, header=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import dask
import scipy
import time
from functools import partial
from abc import ABCMeta, abstractmethod
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import point_in_polygon
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF, DotProduct, WhiteKernel
import factorialModel
import loadData
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d, griddata
import SSVI
import bootstrapping
#######################################################################################################
class InterpolationModel(factorialModel.FactorialModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
#Build the learner
def buildModel(self):
#raise NotImplementedError()
return
def trainWithSession(self, session, inputTrain, nbEpoch, inputTest = None):
raise NotImplementedError("Not a tensorflow model")
return super().trainWithSession(session,
inputTrain,
nbEpoch,
inputTest = inputTest)
def train(self, inputTrain, nbEpoch, inputTest = None):
#Do nothing
return np.array([0.0])
def evalModelWithSession(self, sess, inputTest):
raise NotImplementedError("Not a tensorflow model")
return super().evalModelWithSession(sess, inputTest)
def evalModel(self, inputTestList):
#No loss since we interpolate exactly
inputTest = inputTestList[0]
coordinates = inputTestList[1]
loss = pd.Series(np.zeros(inputTest.shape[0]), index = inputTest.index)
#Return the inputs as compressed values
inputs = inputTest.apply(lambda x : self.interpolate(x, coordinates.loc[x.name]), axis=1)
#We do not have any factors so we assign a dummy value of 1
factors = pd.DataFrame(np.ones((inputTest.shape[0],self.nbFactors)),
index=inputTest.index)
return loss, inputs, factors
def getWeightAndBiasFromLayer(self, layer):
raise NotImplementedError("Not a tensorflow model")
return super().getWeightAndBiasFromLayer(layer)
#Interpolate or extrapolate certain values given the knowledge of other ones
def interpolate(self, incompleteSurface, coordinates):
raise NotImplementedError()
return pd.Series()
def completeDataTensor(self,
sparseSurfaceList,
initialValueForFactors,
nbCalibrationStep):
# knownValues = sparseSurface.dropna()
# locationToInterpolate = sparseSurface[sparseSurface.isna()].index
sparseSurface = sparseSurfaceList[0]
coordinates = sparseSurfaceList[1]
interpolatedValues = self.interpolate(sparseSurface, coordinates)
#Not a factorial model, we assign a dummy value
bestFactors = np.ones(self.nbFactors)
#Exact inteprolation
calibrationLoss = 0.0
calibrationSerie = pd.Series([calibrationLoss])
#Complete surface with inteporlated values
bestSurface = interpolatedValues
return calibrationLoss, bestFactors, bestSurface, calibrationSerie
#Interpolation does not assume any factors but relies on some known values
def evalSingleDayWithoutCalibrationWithSensi(self, initialValueForFactors, dataSetList):
raise NotImplementedError("Not a factorial model")
return super().evalSingleDayWithoutCalibrationWithSensi(initialValueForFactors, dataSetList)
def plotInterpolatedSurface(self,valueToInterpolate, calibratedFactors,
colorMapSystem=None,
plotType=None):
raise NotImplementedError("Not a factorial model")
return
def evalInterdependancy(self, fullSurfaceList):
raise NotImplementedError("Not a Factorial model")
return
def evalSingleDayWithoutCalibration(self, initialValueForFactors, dataSetList):
raise NotImplementedError("Not a Factorial model")
return
#ToolBox
#######################################################################################################
def getMaskedPoints(incompleteSurface, coordinates):
return coordinates.loc[incompleteSurface.isna()]
def getMaskMatrix(incompleteSurface):
maskMatrix = incompleteSurface.copy().fillna(True)
maskMatrix.loc[~incompleteSurface.isna()] = False
return maskMatrix
#maskedGrid : surface precising missing value with a NaN
#Assuming indexes and columns are sorted
#Select swaption coordinates (expiry, tenor) whose value is known and are on the boundary
#This defined a polygon whose vertices are known values
def selectPolygonOuterPoints(coordinates):
outerPoints = []
#Group coordinates by first coordinate
splittedCoordinates = {}
for tple in coordinates.values :
if tple[0] not in splittedCoordinates :
splittedCoordinates[tple[0]] = []
splittedCoordinates[tple[0]].append(tple[1])
#Get maximum and minimum for the second dimension
for key in splittedCoordinates.keys():
yMin = np.nanmin(splittedCoordinates[key])
yMax = np.nanmax(splittedCoordinates[key])
outerPoints.append((key,yMin))
outerPoints.append((key,yMax))
return outerPoints
def removeNaNcooridnates(coordinatesList):
isNotNaN = [False if (np.isnan(x[0]) or np.isnan(x[1])) else True for x in coordinatesList]
return coordinatesList[isNotNaN]
#Order a list of vertices to form a polygon
def orderPolygonVertices(outerPointList):
sortedPointList = np.sort(outerPointList) #np sort supports array of tuples
#Points are built as a pair of two points for value in the first dimension
#Hence the polygon starts with points having the first value for the second dimension
#(and order them along the first dimension)
orderedListOfVertices = sortedPointList[::2]
#We then browse the remaining points but in the reverse order for the second dimension
orderedListOfVertices = sortedPointList[1::2][::-1]
return orderedListOfVertices
#Select swaption coordinates (expiry, tenor) whose value is known and are on the boundary
#This defined a polygon whose vertices are known values
def buildInnerDomainCompletion(incompleteSurface, coordinates):
coordinatesWithValues = coordinates.loc[~incompleteSurface.isna()]
outerPointsList = selectPolygonOuterPoints(coordinatesWithValues)
verticesList = orderPolygonVertices(outerPointsList)
expiryVertices, tenorVectices = zip(*verticesList)
return expiryVertices, tenorVectices
#Select swaption coordinates (expiry, tenor) whose value is known
#and their coordinate corresponds to maximum/minimum value for x axis and y axis
#This defines a quadrilateral
def buildOuterDomainCompletion(incompleteSurface, coordinates):
coordinatesWithValues = coordinates.loc[~incompleteSurface.isna()].values
firstDimValues = list(map(lambda x : x[0], coordinatesWithValues))
secondDimValues = list(map(lambda x : x[1], coordinatesWithValues))
maxExpiry = np.amax(firstDimValues)
minExpiry = np.nanmin(firstDimValues)
maxTenor = np.amax(secondDimValues)
minTenor = np.nanmin(secondDimValues)
expiryVertices = [maxExpiry, maxExpiry, minExpiry, minExpiry, maxExpiry]
tenorVectices = [maxTenor, minTenor, minTenor, maxTenor, maxTenor]
return expiryVertices, tenorVectices
#verticesList : list of vertices defining the polygon
#Points : multiIndex serie for which we want to check the coordinates belongs to the domain defined by the polygon
#Use Winding number algorithm
def areInPolygon(verticesList, points):
return pd.Series(points.map(lambda p : point_in_polygon.wn_PnPoly(p, verticesList) != 0).values,
index = points.index)
#Return the list (pandas Dataframe) of points which are located in the domain (as a closed set)
#The closure ( i.e. edge of the domain ) is also returned
#defined by points which are not masked
def areInInnerPolygon(incompleteSurface, coordinates, showDomain = False):
#Add the frontier
gridPoints = coordinates.loc[~incompleteSurface.isna()]
#Build polygon from the frontier
expiriesPolygon, tenorsPolygon = buildInnerDomainCompletion(incompleteSurface, coordinates)
polygon = list(zip(expiriesPolygon,tenorsPolygon))
#Search among masked points which ones lie inside the polygon
maskedPoints = getMaskedPoints(incompleteSurface, coordinates)
interiorPoints = areInPolygon(polygon, maskedPoints)
if not interiorPoints.empty :
gridPoints = gridPoints.append(maskedPoints[interiorPoints]).drop_duplicates()
if showDomain :
plt.plot(expiriesPolygon,tenorsPolygon)
plt.xlabel("First dimension")
plt.xlabel("Second dimension")
plt.plot(gridPoints.map(lambda x : x[0]).values,
gridPoints.map(lambda x : x[1]).values,
'ro')
plt.show()
return gridPoints
#Return the list (pandas Dataframe) of points which are located in the outer domain (as a closed set)
#Outer domain is delimited by the maximum and minimum coordinates of the known values
#inner domain is delimited by the polygon whose vertices are the known points
#showDomain plots the boundary ( i.e. edge of the domain ) and the points which are inside the quadrilateral
def areInOuterPolygon(incompleteSurface, coordinates, showDomain = False):
#Add the frontier
gridPoints = coordinates.loc[~incompleteSurface.isna()]
#Build polygon from the frontier
expiriesPolygon, tenorsPolygon = buildOuterDomainCompletion(incompleteSurface, coordinates)
polygon = list(zip(expiriesPolygon,tenorsPolygon))
#Search among masked points which ones lie inside the polygon
maskedPoints = getMaskedPoints(incompleteSurface, coordinates)
interiorPoints = areInPolygon(polygon, maskedPoints)
if not interiorPoints.empty :
gridPoints = gridPoints.append(maskedPoints[interiorPoints]).drop_duplicates()
if showDomain :
plt.plot(expiriesPolygon,tenorsPolygon)
plt.xlabel("First dimension")
plt.xlabel("Second dimension")
plt.plot(gridPoints.map(lambda x : x[0]).values,
gridPoints.map(lambda x : x[1]).values,
'ro')
plt.show()
return gridPoints
#######################################################################################################
#Linear interpolation with flat extrapolation
#Assume row are non empty
def interpolateRow(row, coordinates):
definedValues = row.dropna()
if definedValues.size == 1 :
return pd.Series(definedValues.iloc[0] * np.ones_like(row),
index = row.index)
else :
#Flat extrapolation and linear interpolation based on index (Tenor) value
filledRow = row.interpolate(method='index', limit_direction = 'both')
return filledRow
def formatCoordinatesAsArray(coordinateList):
x = np.ravel(list(map(lambda x : x[0], coordinateList)))
y = np.ravel(list(map(lambda x : x[1], coordinateList)))
return np.vstack((x, y)).T
#Linear interpolation combined with Nearest neighbor extrapolation
# drawn from https://github.com/mChataign/DupireNN
def customInterpolator(interpolatedData, formerCoordinates, NewCoordinates):
knownPositions = formatCoordinatesAsArray(formerCoordinates)
xNew = np.ravel(list(map(lambda x : x[0], NewCoordinates)))
yNew = np.ravel(list(map(lambda x : x[1], NewCoordinates)))
# print(type(xNew))
# print(type(yNew))
# print(np.array((xNew, yNew)).T.shape)
# print(type(interpolatedData))
# print(type(knownPositions))
# print()
fInterpolation = griddata(knownPositions,
np.ravel(interpolatedData),
np.array((xNew, yNew)).T,
method = 'linear',
rescale=True)
fExtrapolation = griddata(knownPositions,
np.ravel(interpolatedData),
np.array((xNew, yNew)).T,
method = 'nearest',
rescale=True)
return np.where(np.isnan(fInterpolation), fExtrapolation, fInterpolation)
def interpolate(incompleteSurface, coordinates):
knownValues = incompleteSurface.dropna()
knownLocation = coordinates.loc[knownValues.index]
locationToInterpolate = coordinates.drop(knownValues.index)
interpolatedValues = customInterpolator(knownValues.values,
knownLocation.values,
locationToInterpolate.values)
completeSurface = pd.Series(interpolatedValues,
index = locationToInterpolate.index).append(knownValues)
return completeSurface.loc[incompleteSurface.index].rename(incompleteSurface.name)
def extrapolationFlat(incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface, coordinates)
correctedSurface = interpolate(filteredSurface, filteredCoordinates)
correctedSurface = correctedSurface.append(pd.Series(incompleteSurface.drop(filteredCoordinates.index),
index = coordinates.drop(filteredCoordinates.index).index))
return correctedSurface.sort_index()
#######################################################################################################
class LinearInterpolation(InterpolationModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestLinearInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
#Extrapolation is flat and interpolation is linear
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface, coordinates)
interpolatedSurface = interpolate(filteredSurface, filteredCoordinates)
nanSurface = incompleteSurface.drop(interpolatedSurface.index)
return interpolatedSurface.append(nanSurface)[coordinates.index].rename(incompleteSurface.name)
# #Build the learner
# def buildModel(self):
# raise NotImplementedError()
# return
#######################################################################################################
class SplineInterpolation(LinearInterpolation):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestSplineInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
def customInterpolator(self, interpolatedData, formerCoordinates, NewCoordinates):
knownPositions = formatCoordinatesAsArray(formerCoordinates)
xNew = np.ravel(list(map(lambda x : x[0], NewCoordinates)))
yNew = np.ravel(list(map(lambda x : x[1], NewCoordinates)))
fInterpolation = griddata(knownPositions,
np.ravel(interpolatedData),
(xNew, yNew),
method = 'cubic',
rescale=True)
fExtrapolation = griddata(knownPositions,
np.ravel(interpolatedData),
(xNew, yNew),
method = 'nearest',
rescale=True)
return np.where(np.isnan(fInterpolation), fExtrapolation, fInterpolation)
#Extrapolation is flat and interpolation is linear
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface, coordinates)
knownValues = filteredSurface.dropna()
knownLocation = filteredCoordinates.loc[knownValues.index]
locationToInterpolate = filteredCoordinates.drop(knownValues.index)
interpolatedValues = self.customInterpolator(knownValues.values,
knownLocation.values,
locationToInterpolate.values)
completeSurface = pd.Series(interpolatedValues,
index = locationToInterpolate.index).append(knownValues)
interpolatedSurface = completeSurface.loc[filteredSurface.index].rename(filteredSurface.name)
nanSurface = incompleteSurface.drop(interpolatedSurface.index)
return interpolatedSurface.append(nanSurface)[coordinates.index].rename(incompleteSurface.name)
#######################################################################################################
class GaussianProcess(InterpolationModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestGaussianModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
self.TrainGaussianHyperparameters = (self.hyperParameters["Train Interpolation"]
if ("Train Interpolation" in self.hyperParameters) else False)
self.sigmaF = self.hyperParameters["sigmaF"] if ("sigmaF" in self.hyperParameters) else 50.0
self.bandwidth = self.hyperParameters["bandwidth"] if ("bandwidth" in self.hyperParameters) else 0.5
self.sigmaBounds = self.hyperParameters["sigmaBounds"] if ("sigmaBounds" in self.hyperParameters) else (1.0, 200.0)
self.bandwidthBounds = self.hyperParameters["bandwidthBounds"] if ("bandwidthBounds" in self.hyperParameters) else (0.01, 10.0)
self.kernel = (ConstantKernel(constant_value=self.sigmaF,
constant_value_bounds=self.sigmaBounds)
* RBF(length_scale=self.bandwidth,
length_scale_bounds=self.bandwidthBounds))
def kernelRBF(self, X1, X2, sigma_f=1.0, l=1.0):
'''
Isotropic squared exponential kernel. Computes
a covariance matrix from points in X1 and X2.
Args:
X1: Array of m points (m x d).
X2: Array of n points (n x d).
Returns:
Covariance matrix (m x n).
'''
#print("sigma_f : ",sigma_f," l : ",l)
sqdist = np.sum(X1**2, 1).reshape(-1, 1) + np.sum(X2**2, 1) - 2 * np.dot(X1, X2.T)
return sigma_f**2 * np.exp(-0.5 / l**2 * sqdist)
def predictGaussianModel(self, X, XStar, Y, sigma_f, l):
KStar = self.kernelRBF(X, XStar, sigma_f, l)
KStarT = KStar.T
K = self.kernelRBF(X, X, sigma_f, l)
#Add noise to avoid singular matrix problem
noise = (1e-9) * np.eye(K.shape[0])
KInv = np.linalg.inv(K + noise)
KStarStar = self.kernelRBF(XStar, XStar, sigma_f, l)
YStar = np.dot(np.dot(KStarT,KInv),Y)
YStarUncertainty = KStarStar - np.dot(np.dot(KStarT,KInv),KStar)
return YStar, YStarUncertainty
def predictGaussianModelFormatted(self, knownValues, locationToInterpolate, coordinates):
knownLocation = coordinates.loc[knownValues.index]
#Optimize on log parameters
interpolatedValues, _ = self.predictGaussianModel(formatCoordinatesAsArray(knownLocation.values),
formatCoordinatesAsArray(locationToInterpolate.values),
knownValues.values,
np.exp(self.kernel.theta[0]),
np.exp(self.kernel.theta[1]))
return pd.Series(interpolatedValues, index = locationToInterpolate.index)
#Interpolate or extrapolate certain values given the knowledge of other ones
def interpolate(self, incompleteSurface, coordinates):
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(incompleteSurface,
coordinates)
nanSurface = incompleteSurface.drop(filteredSurface.index)
extrapolationMode = self.hyperParameters["extrapolationMode"] if "extrapolationMode" in self.hyperParameters else None
#NoExtrapolation : NoExtrapolation | InnerDomain | OuterDomain
#LocationToInterpolate : Index of missing values
#knownValues : Serie of values which are known
knownValues = filteredSurface.dropna()
if knownValues.size == filteredSurface.size : #No value to interpolate
return incompleteSurface
resSurface = filteredSurface.copy()
interpolatedPoint = None
if extrapolationMode == 'InnerDomain' :
interpolatedPoint = areInInnerPolygon(filteredSurface, filteredCoordinates)
elif extrapolationMode == 'OuterDomain' :
interpolatedPoint = areInOuterPolygon(filteredSurface, filteredCoordinates)
else : #NoExtrapolation
interpolatedPoint = filteredCoordinates.drop(knownValues.index)
if self.TrainGaussianHyperparameters :
interpolatedValues = self.predictGaussianModelFormatted(knownValues,
interpolatedPoint,
filteredCoordinates)
else :
knownLocation = filteredCoordinates.loc[knownValues.index]
interpolator = GaussianProcessRegressor(kernel=self.kernel,
random_state=0,
normalize_y=True).fit(formatCoordinatesAsArray(knownLocation.values),
knownValues.values)
interpolatedValues = pd.Series(interpolator.predict(formatCoordinatesAsArray(interpolatedPoint.values), return_std=False),
index = interpolatedPoint.index)
resSurface.loc[interpolatedValues.index] = interpolatedValues
return extrapolationFlat(resSurface.append(nanSurface)[incompleteSurface.index].rename(incompleteSurface.name),
coordinates)
def nll_fn(self, X_trainSerie, Y_trainSerie, theta, noise=1e-3):
'''
Computes the negative log marginal
likelihood for training data X_train and Y_train and given
noise level.
Args:
X_train: training locations (m x d).
Y_train: training targets (m x 1).
noise: known noise level of Y_train.
theta: gaussian hyperparameters [sigma_f, l]
'''
filteredSurface, filteredCoordinates = loadData.removePointsWithInvalidCoordinates(Y_trainSerie,
X_trainSerie)
Y_Train = filteredSurface.dropna().values
X_train = formatCoordinatesAsArray(filteredCoordinates.loc[filteredSurface.dropna().index].values)
# Numerically more stable implementation of Eq. (7) as described
# in http://www.gaussianprocess.org/gpml/chapters/RW2.pdf, Section
# 2.2, Algorithm 2.1.
K = (self.kernelRBF(X_train, X_train, sigma_f=theta[0], l=theta[1]) +
noise**2 * np.eye(len(X_train)))
L = np.linalg.cholesky(K)
return (np.sum(np.log(np.diagonal(L))) +
0.5 * Y_train.T.dot(np.linalg.lstsq(L.T, np.linalg.lstsq(L, Y_train)[0])[0]) +
0.5 * len(X_train) * np.log(2*np.pi))
#Apply nll_fn for each day of YSerie and sum results
def computeTrainHistoryLogLikelyhood(self, kernelParams, dataSetList):
error = 0
locations = dataSetList[1] #YSerie.iloc[0].index.to_frame().values
func = lambda x : self.nll_fn(locations.loc[x.name], x, np.exp(kernelParams))
marginalLogLikelyhood = dataSetList[0].apply(func, axis = 1)
return marginalLogLikelyhood.sum()
def train(self, inputTrain, nbEpoch, inputTest = None):
if self.TrainGaussianHyperparameters :
#Calibrate globally gaussian process hyperparameters l and sigma on the training set
objectiveFuntion = lambda x : self.computeTrainHistoryLogLikelyhood(x,inputTrain)
nbRestart = 5#15
bestValue = None
bestParam = None
#As loglikelyhood function is nonconvex we try l-bfgs algorithms several times
def randomStart(bounds, nbStart):
return np.random.uniform(low=bounds[0], high=bounds[1], size=nbStart)
optimStarts = np.apply_along_axis(lambda x : randomStart(x,nbRestart),
1,
self.kernel.bounds).T
start = time.time()
for i in range(nbRestart):
print("bounds", np.exp(self.kernel.bounds))
print("random Starts", np.exp(optimStarts[i]))
resOptim = scipy.optimize.fmin_l_bfgs_b(objectiveFuntion,
optimStarts[i],
approx_grad = True,
maxiter = 20,
bounds = self.kernel.bounds)
if self.verbose :
print(resOptim)
if (bestParam is None) or (resOptim[1] < bestValue) :
bestValue = resOptim[1]
bestParam = resOptim[0]
print("Attempt : ", i, " nnLogLikelyHood : ", bestValue, " bestParam : ", np.exp(bestParam))
optimalValues = {'k1__constant_value' : np.exp(bestParam)[0],
'k2__length_scale' : np.exp(bestParam)[1]}
self.kernel.set_params(**optimalValues)
print("Time spent during optimization : ", time.time() - start)
#Else
return super().train(inputTrain, nbEpoch, inputTest = None)
def getTTMFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[0])
def getMoneynessFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[1])
#######################################################################################################
class NelsonSiegelCalibrator:
#######################################################################################################
#Construction functions
#######################################################################################################
def __init__(self,
order,
hyperparameters):
self.hyperParameters = hyperparameters
self.order = order
self.beta = []
self.alpha = []
self.verbose = False
def objectiveFunction(self, ttm, beta, alpha):
slopeTime = (1 - np.exp(-alpha[0] * ttm))/(alpha[0] * ttm)
nelsonSiegel = beta[0] + slopeTime * beta[1] + (slopeTime - np.exp(-alpha[0] * ttm)) * beta[2]
if self.order == 4 :
nelsonSiegelSvensson = nelsonSiegel + ((1 - np.exp(-alpha[1] * ttm))/(alpha[1] * ttm) - np.exp(-alpha[1] * ttm)) * beta[3]
return nelsonSiegelSvensson
return nelsonSiegel
def drawnStartingPoints(self, bounds):
randPos = np.random.rand(len(bounds))
return [x[0][0] + (x[0][1] - x[0][0]) * x[1] for x in zip(bounds, randPos)]
def calibrate(self, curvesVol, ttms):
if self.order == 4 :
#Bounds taken from "Calibrating the Nelson–Siegel–Svensson model", <NAME>, <NAME>, <NAME>
#See https://comisef.eu/files/wps031.pdf
bounds = [(-10000,10000), (-10000,10000), (-10000,10000), (-10000,10000), (0,100), (0,200)]
startBounds = [(-1,1), (-1,1), (-1,1), (-1,1), (0,30), (0,30)]
func = lambda x : np.sqrt(np.nanmean(np.square(self.objectiveFunction(ttms/250, x[:4], x[4:]) - curvesVol)))
else :
bounds = [(-10000,10000), (-10000,10000), (-10000,10000), (0,200)]
startBounds = [(-1,1), (-1,1), (-1,1), (0,30)]
func = lambda x : np.sqrt(np.nanmean(np.square(self.objectiveFunction(ttms/250, x[:3], x[3:]) - curvesVol)))
bestFit = None
nbInit = 10
for k in range(nbInit) :
startingPoints = self.drawnStartingPoints(startBounds)
resOptim = scipy.optimize.minimize(func, startingPoints, bounds=bounds, method='L-BFGS-B')
if bestFit is None or resOptim.fun < bestFit :
bestFit = resOptim.fun
self.beta = resOptim.x[:4] if self.order == 4 else resOptim.x[:3]
self.alpha = resOptim.x[4:] if self.order == 4 else resOptim.x[3:]
if self.verbose :
print(resOptim.fun, " ; ", bestFit)
if self.verbose :
print("best error : ", bestFit)
return
def interpolate(self, ttm):
return self.objectiveFunction(ttm/250, self.beta, self.alpha)
#Post-treatments for calibrateModelMoneynessWiseDaily
def mergeResults(xCoordinates,
xAvgCoordinates,
xVol,
interpList,
refList,
nelsonList,
dfList):
interpVolDf = pd.concat(interpList,axis=1)
refVolDf = pd.concat(refList,axis=1)
moneynesses = np.unique(getMoneynessFromCoordinates(dfList))
nelsonIndex = pd.MultiIndex.from_product( [moneynesses, nelsonList[0].columns],
names=["Moneyness", "Nelson-Siegel Parameters"])
nelsonDf = pd.DataFrame(pd.concat(nelsonList,axis=1).values,
index = nelsonList[0].index,
columns = nelsonIndex)
coordinatesIndex = pd.MultiIndex.from_product([moneynesses, xCoordinates[0].columns],
names=["Moneyness", "Rank"])
coordinatesDf = pd.DataFrame( | pd.concat(xCoordinates,axis=1) | pandas.concat |
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.integrate
import growth.model
import growth.integrate
import growth.viz
import seaborn as sns
import tqdm
const = growth.model.load_constants()
colors, palette = growth.viz.matplotlib_style()
mapper = growth.viz.load_markercolors()
#%%
# Load the Dai data
ribo_chlor_data = pd.read_csv('../data/main_figure_data/Fig5A_Dai2016_chloramphenicol_ribosome_content.csv')
elong_chlor_data = pd.read_csv('../data/main_figure_data/Fig5A_Dai2016_chloramphenicol_elongation_rates.csv')
# Load the comparison data
# Define constant parameters
gamma_max = const['gamma_max']
Kd_TAA = const['Kd_TAA']
Kd_TAA_star = const['Kd_TAA_star']
tau = const['tau']
kappa_max = const['kappa_max']
phi_O = const['phi_O']
# Define parameter ranges for non-chlor case
nu_range = np.linspace(0.1, 20, 200)
# Compute the non-chlor case
nochlor_df = pd.DataFrame([])
for i, nu in enumerate(tqdm.tqdm(nu_range)):
# Define the arguments
args = {'gamma_max':gamma_max,
'nu_max': nu,
'tau': tau,
'Kd_TAA': Kd_TAA,
'Kd_TAA_star': Kd_TAA_star,
'kappa_max': kappa_max,
'phi_O': phi_O}
# Equilibrate the model and print out progress
out = growth.integrate.equilibrate_FPM(args, tol=2, max_iter=10)
# Assemble the dataframe
ratio = out[-1] / out[-2]
phiRb = (1 - phi_O) * (ratio / (ratio + tau))
gamma = gamma_max * (out[-1] / (out[-1] + Kd_TAA_star))
nochlor_df = nochlor_df.append({'MRb_M': out[1]/out[0],
'phiRb': phiRb,
'gamma': gamma,
'v_tl': gamma * 7459 / 3600,
'nu': nu,
'lam': gamma * phiRb},
ignore_index=True)
#%%
# Estimate the best nu for each growth medium
nu_mapper = {}
for g, d in ribo_chlor_data[ribo_chlor_data['chlor_conc_uM']==0].groupby(['medium']):
phiRb = d['mass_fraction'].values[0]
lam = d['growth_rate_hr'].values[0]
estimated_nu = growth.integrate.estimate_nu_FPM(phiRb, lam, const, phi_O,
verbose=True, nu_buffer=1,
tol=2)
nu_mapper[g] = estimated_nu
#%%
# Using the estimated nus, perform the integration
chlor_range = np.linspace(0, 12.5, 10) * 1E-6
# Compute the non-chlor case
chlor_df = | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 6 11:26:20 2021
@author: clement
"""
# =============================================================================
## Library dependancies
# =============================================================================
import pandas as pd
import os
import seaborn as sns
from pathlib import Path
from os.path import isfile, isdir, join
from utilitaries import dfListConnex
# =============================================================================
## Basic functions
# =============================================================================
def children(data_list, max_step = 6):
"""
Return a dataframe containing the children of each node for all steps
in max_step.
Parameters:
-----------
data_list: list of pandas dataframe
Each element in data_list must contain at least three columns named
members, weight and small_junction.
max_steps: interger, default 6.
Number of time steps the children search will de done.
Returns:
--------
df: pandas dataframe
A new dataframe with the weight, the step, the children and children
number for each cluster and each time step.
"""
maxStep = min(len(data_list), max_step)
if maxStep < max_step:
print("Not enough data to reach {} steps. Aborting operation.".format(max_step))
n = len(data_list[0])
L_len = []
A = []
for i in range(n):
# Create a list of all the members in the junction
L = data_list[0].iloc[i]['members']
L_len += [data_list[0].iloc[i]['mass']]*maxStep
for step in range(1,1+maxStep):
M = []
# Check if the members of the cluster in the dataframe corresponding to step are in L
try:
#print('length: {}'.format(len(data_list[step])))
for k in range(len(data_list[step])):
for elem in data_list[step].at[k,'members']:
if elem in L:
M += [(k, data_list[step].at[k,'small_junction'])]
except IndexError:
print(k)
# Eliminate the repetition in M
N = sorted(list(set(M)))
P = []
# Count the occurences of each children
for elem in N:
P += [(elem[0], elem[1], M.count(elem))]
A += [[step, P, len(P)]]
df = pd.DataFrame(A, columns=['step', 'children', 'children_number'])
df['mass'] = L_len
return df
# def childrenAlternative(data_list, max_step = 6):
# """
# Return a dataframe containing the children of each node for all steps
# in max_step.
# Parameters:
# -----------
# data_list: list of pandas dataframe
# Each element in data_list must contain at least three columns named
# members, weight and small_junction.
# max_steps: interger, default 6.
# Number of time steps the children search will de done.
# Returns:
# --------
# df: pandas dataframe
# A new dataframe with the weight, the step, the children and children
# number for each cluster and each time step.
# """
# maxStep = min(len(data_list), max_step)
# if maxStep < max_step:
# print("Not enough data to reach {} steps. Aborting operation.".format(max_step))
# n = len(data_list[0])
# L_len = []
# A = []
# for i in range(n):
# # Create a list of all the members in the junction
# L = data_list[0].iloc[i]['members']
# L_len += [data_list[0].iloc[i]['mass']]*maxStep
# for step in range(1,1+maxStep):
# M = []
# # Check if the members of the cluster in the dataframe corresponding to step are in L
# for elem in L:
# k = data_list[step].loc[elem in data_list[step]['members']]
# # for k in range(len(data_list[step])):
# # for elem in data_list[step].at[k,'members']:
# # if elem in L:
# # M += [(k, data_list[step].at[k,'small_junction'])]
# # Eliminate the repetition in M
# N = sorted(list(set(M)))
# P = []
# # Count the occurences of each children
# for elem in N:
# P += [(elem[0], elem[1], M.count(elem))]
# A += [[step, P, len(P)]]
# df = pd.DataFrame(A, columns=['step', 'children', 'children_number'])
# df['mass'] = L_len
# return df
def fluidity(data_list, max_offset, max_step):
"""
Return a dataframe containing the children of each node for all steps
in max_step and for all starting data in max_offset.
Parameters:
-----------
data_list: list of pandas dataframe
Each element in data_list must contain at least three columns named
members, weight and small_junction.
max_offset: integer
Until which file in the data_list does the function make the children
search?
max_steps: interger
Number of time steps the children search will de done.
Returns:
--------
df: pandas dataframe
A new dataframe with the weight, the step, the children, children
number and fluidity for each cluster and each time step.
"""
dfluidity = []
for offset in range(max_offset):
data = children(data_list[offset:], max_step)
# Add the starting time to teh dataframe
data['time'] = [int(50000*(offset+1)) for j in range(len(data))]
K = []
# Add the fluidity measure to each junction
for i in range(len(data)):
K += [(data.at[i,'children_number'] - 1)/ data.at[i,'mass']]
data['fluidity'] = K
dfluidity += [data]
df = | pd.concat(dfluidity, ignore_index=True) | pandas.concat |
import os
import glob
from datetime import datetime, timedelta
import json
import time
import pandas as pd
import numpy as np
import pickle
import lightgbm as lgbm
from google.cloud import bigquery
def load_strava_activity_data_from_bq(users=["TyAndrews"]):
start_time = time.time()
raw_files_dict = {}
for user in users:
print(f"{user} Data Found")
bqclient = bigquery.Client()
strava_data_query = """
SELECT
distance_km,
type,
start_date_local AS start_time,
distance_km AS distance_raw_km,
elapsed_time_hrs AS elapsed_time_raw_hrs,
moving_time_hrs AS moving_time_raw_hrs,
total_elevation_gain AS elevation_gain
FROM `{0}.prod_dashboard.raw_strava_data` LIMIT 5000""".format(
bqclient.project
)
raw_files_dict[user] = bqclient.query(strava_data_query).result().to_dataframe()
print(
f"load_strava_activity_data: Took {time.time() - start_time: .2f}s to get BQ data"
)
return raw_files_dict
def preprocess_strava_df(raw_df, min_act_length=1200, max_act_dist=400, export=False):
# Remove activites under 5 minutes in length
processed_df = raw_df[
(raw_df.elapsed_time_raw > min_act_length) & (raw_df.distance < max_act_dist)
]
print(
f"\t{len(raw_df[(raw_df.elapsed_time_raw < min_act_length) & (raw_df.distance < max_act_dist)])} Activities Under 20min in Length, Removed from Dataset"
)
processed_df = processed_df.convert_dtypes()
processed_df[["distance", "distance_raw"]] = processed_df[
["distance", "distance_raw"]
].apply(pd.to_numeric)
processed_df[["start_date_local"]] = pd.to_datetime(
processed_df["start_date_local_raw"], unit="s"
) # .apply(pd.to_datetime(unit='s'))
processed_df["exer_start_time"] = pd.to_datetime(
processed_df["start_date_local"].dt.strftime("1990:01:01:%H:%M:%S"),
format="1990:01:01:%H:%M:%S",
)
# processed_df['exer_start_time'] = pd.to_datetime(pd.to_datetime(processed_df['start_time']).dt.strftime('1990:01:01:%H:%M:%S'), format='1990:01:01:%H:%M:%S')
processed_df["exer_start_time"] = (
processed_df["exer_start_time"]
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/London")
)
processed_df["act_type_perc_time"] = processed_df["moving_time_raw"] / sum(
processed_df["moving_time_raw"]
)
processed_df["elapsed_time_raw_hrs"] = processed_df["elapsed_time_raw"] / 3600
processed_df["moving_time_raw_hrs"] = processed_df["moving_time_raw"] / 3600
processed_df["distance_raw_km"] = processed_df["distance_raw"] / 1000
if export == True:
processed_df.to_csv(r"data\processed\ProcessedStravaData.csv")
return processed_df
def load_employment_model_data():
model_data_file_path = os.path.abspath(
os.path.join(os.getcwd(), "data", "processed")
)
print("Loading Employment Model Data: " + model_data_file_path)
start = time.process_time()
train_data = os.path.join(model_data_file_path, "train_employment_data.csv")
test_data = os.path.join(model_data_file_path, "test_employment_data.csv")
files = [train_data, test_data]
all_data = []
for f in files:
data = pd.read_csv(f)
all_data.append(data)
return all_data[0], all_data[1]
def preprocess_employment_model_data(input_data, work_hours):
data = input_data.iloc[:, 0:24]
labels = input_data["label"]
morning = work_hours[0]
afternoon = work_hours[1]
data["morn"] = data.iloc[:, morning[0] - 1 : morning[1]].sum(axis=1)
data["aft"] = data.iloc[:, afternoon[0] - 1 : afternoon[1]].sum(axis=1)
return data, labels
def load_week_start_times_data():
print("Loading Weekly Employment Summary Data: ")
week_data = os.path.join(
os.getcwd(), "data", "processed", "yearly_week_start_times.json"
)
yearly_week_summary_data = json.load(open(week_data, "r"))
return yearly_week_summary_data
def load_lgbm_model(model_file_name="lgbm_employment_classifier.txt"):
lgbm_model = lgbm.Booster(
model_file=os.path.join(os.getcwd(), "models", model_file_name)
)
return lgbm_model
def load_logreg_model(model_file_name="logreg_employment_model.pkl"):
logreg_model = pickle.load(
open(os.path.join(os.getcwd(), "models", model_file_name), "rb")
)
return logreg_model
def load_logreg_model_results(data_set):
print("Loading " + data_set + " LogReg Model Results: ")
if data_set == "test":
logreg_results = pd.read_csv(
glob.glob(
os.path.join(os.getcwd(), "data", "processed", "test_logreg_model*.csv")
)[0]
)
elif data_set == "train":
logreg_results = pd.read_csv(
glob.glob(
os.path.join(
os.getcwd(), "data", "processed", "train_logreg_model*.csv"
)
)[0]
)
return logreg_results
def load_lgbm_model_results(data_set):
print("Loading " + data_set + " LogReg Model Results: ")
if data_set == "test":
lgbm_results = pd.read_csv(
glob.glob(
os.path.join(os.getcwd(), "data", "processed", "test_lgbm_model*.csv")
)[0]
)
elif data_set == "train":
lgbm_results = pd.read_csv(
glob.glob(
os.path.join(os.getcwd(), "data", "processed", "train_lgbm_model*.csv")
)[0]
)
return lgbm_results
def load_lgbm_heatmap(data_set):
lgbm_heatmap = pd.read_csv(
glob.glob(
os.path.join(
os.getcwd(), "data", "processed", data_set + "_lgbm_model_heatmap*.csv"
)
)[0]
)
return lgbm_heatmap
def load_logreg_heatmap(data_set):
logreg_heatmap = pd.read_csv(
glob.glob(
os.path.join(
os.getcwd(),
"data",
"processed",
data_set + "_logreg_model_heatmap*.csv",
)
)[0]
)
return logreg_heatmap
def generate_employment_prediction_model_data(
activity_df, start_year, end_year, start_month, end_month, label
):
summary_data = {}
train_data = []
for year in range(start_year, end_year + 1):
summary_data[str(year)] = {}
begin_month = 1
stop_month = 12 + 1 # Add one to account for indexing up to but not including
if year == start_year:
begin_month = start_month
if year == end_year:
stop_month = (
end_month + 1
) # Add one to account for indexing up to but not including
print(f"{year} {begin_month} {stop_month}")
for month in range(begin_month, stop_month):
summary_data[str(year)][str(month)] = {}
print(f"\t{month}")
# for month in range(quarter*3, quarter*3+3):
# print(f'\t\t{month}')
for day in range(0, 7):
# print(f'\tProcessing Day {day}')
summary_data[str(year)][str(month)][str(day)] = 24 * [0]
# days_data = quarter_data[pd.DatetimeIndex(quarter_data.start_date_local).weekday == day]
for hour in range(0, 24):
# print(f'\t\tAccumulating Hour {hour}')
# hours_data = days_data.set_index('start_date_local')[pd.DatetimeIndex(days_data.start_date_local).hour == hour]
hours_data = activity_df[
(pd.DatetimeIndex(activity_df.start_date_local).year == year)
& (
| pd.DatetimeIndex(activity_df.start_date_local) | pandas.DatetimeIndex |
# Inspirations from : https://towardsdatascience.com/keyword-extraction-with-bert-724efca412ea
import pandas as pd
import numpy as np
from tqdm import tqdm
from sentence_transformers import SentenceTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Data
training_set = | pd.read_json('processed_data/train_set.json') | pandas.read_json |
import numpy as np
import pandas as pd
import gc
import warnings
warnings.filterwarnings('ignore')
np.random.seed(123)
class Config():
def __init__(self, load=True):
"""Load the train and test sets with some basic EDA"""
# self.train_filename = train_filename
# self.test_filename = test_filename
def load_data(self, train_filename, test_filename, print_EDA=False):
self.filename_test = train_filename
self.filename_train = test_filename
# Read data
# train_cols = ['id', 'vendor_id', 'pickup_datetime', 'dropoff_datetime', 'passenger_count', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'store_and_fwd_flag', 'trip_duration']
# test_cols = ['id', 'vendor_id', 'pickup_datetime', 'passenger_count', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'store_and_fwd_flag']
train = pd.read_csv(train_filename, header=0) #names=train_cols,
test = pd.read_csv(test_filename, header=0) #names=test_cols,
if print_EDA :
print("===================== LETS DO SOME EDA =====================")
# Do some data stats
print('We have {} training rows and {} test rows.'.format(train.shape[0], test.shape[0]))
print('We have {} training columns and {} test columns.'.format(train.shape[1], test.shape[1]))
print(train.head(2))
print("============================================================")
print(test.head(2))
# Check for NaNs
if train.count().min() == train.shape[0] and test.count().min() == test.shape[0]:
print('We do not need to worry about missing values.')
else:
print('oops')
print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique()))))
print('The vendor_id has {}/{} distincit train/test values {}.'.format(str(len(set(train.vendor_id))) , str(len(set(test.vendor_id))), str(set(train.vendor_id.unique()) | set(test.vendor_id.unique()))))
gc.collect()
train.dropna(inplace=True)
test.dropna(inplace=True)
## Convert dates to datetime features
train['pickup_datetime'] = pd.to_datetime(train.pickup_datetime)
train['dropoff_datetime'] = | pd.to_datetime(train.dropoff_datetime) | pandas.to_datetime |
# pylint: disable=redefined-outer-name
import itertools
import time
import pytest
import math
import flask
import pandas as pd
import numpy as np
import json
import psutil # noqa # pylint: disable=unused-import
from bentoml.utils.dataframe_util import _csv_split, _guess_orient
from bentoml.adapters import DataframeInput
from bentoml.adapters.dataframe_input import (
check_dataframe_column_contains,
read_dataframes_from_json_n_csv,
)
from bentoml.exceptions import BadInput
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def test_dataframe_request_schema():
input_adapter = DataframeInput(
input_dtypes={"col1": "int", "col2": "float", "col3": "string"}
)
schema = input_adapter.request_schema["application/json"]["schema"]
assert "object" == schema["type"]
assert 3 == len(schema["properties"])
assert "array" == schema["properties"]["col1"]["type"]
assert "integer" == schema["properties"]["col1"]["items"]["type"]
assert "number" == schema["properties"]["col2"]["items"]["type"]
assert "string" == schema["properties"]["col3"]["items"]["type"]
def test_dataframe_handle_cli(capsys, tmpdir):
def test_func(df):
return df["name"][0]
input_adapter = DataframeInput()
json_file = tmpdir.join("test.json")
with open(str(json_file), "w") as f:
f.write('[{"name": "john","game": "mario","city": "sf"}]')
test_args = ["--input={}".format(json_file)]
input_adapter.handle_cli(test_args, test_func)
out, _ = capsys.readouterr()
assert out.strip().endswith("john")
def test_dataframe_handle_aws_lambda_event():
test_content = '[{"name": "john","game": "mario","city": "sf"}]'
def test_func(df):
return df["name"][0]
input_adapter = DataframeInput()
event = {
"headers": {"Content-Type": "application/json"},
"body": test_content,
}
response = input_adapter.handle_aws_lambda_event(event, test_func)
assert response["statusCode"] == 200
assert response["body"] == '"john"'
input_adapter = DataframeInput()
event_without_content_type_header = {
"headers": {},
"body": test_content,
}
response = input_adapter.handle_aws_lambda_event(
event_without_content_type_header, test_func
)
assert response["statusCode"] == 200
assert response["body"] == '"john"'
with pytest.raises(BadInput):
event_with_bad_input = {
"headers": {},
"body": "bad_input_content",
}
input_adapter.handle_aws_lambda_event(event_with_bad_input, test_func)
def test_check_dataframe_column_contains():
df = pd.DataFrame(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=["a", "b", "c"]
)
# this should pass
check_dataframe_column_contains({"a": "int", "b": "int", "c": "int"}, df)
check_dataframe_column_contains({"a": "int"}, df)
check_dataframe_column_contains({"a": "int", "c": "int"}, df)
# this should raise exception
with pytest.raises(BadInput) as e:
check_dataframe_column_contains({"required_column_x": "int"}, df)
assert "Missing columns: required_column_x" in str(e.value)
with pytest.raises(BadInput) as e:
check_dataframe_column_contains(
{"a": "int", "b": "int", "d": "int", "e": "int"}, df
)
assert "Missing columns:" in str(e.value)
assert "required_column:" in str(e.value)
def test_dataframe_handle_request_csv():
def test_function(df):
return df["name"][0]
input_adapter = DataframeInput()
csv_data = 'name,game,city\njohn,mario,sf'
request = MagicMock(spec=flask.Request)
request.headers = (('orient', 'records'),)
request.content_type = 'text/csv'
request.get_data.return_value = csv_data
result = input_adapter.handle_request(request, test_function)
assert result.get_data().decode('utf-8') == '"john"'
def assert_df_equal(left: pd.DataFrame, right: pd.DataFrame):
'''
Compare two instances of pandas.DataFrame ignoring index and columns
'''
try:
left_array = left.values
right_array = right.values
if right_array.dtype == np.float:
np.testing.assert_array_almost_equal(left_array, right_array)
else:
np.testing.assert_array_equal(left_array, right_array)
except AssertionError:
raise AssertionError(
f"\n{left.to_string()}\n is not equal to \n{right.to_string()}\n"
)
DF_CASES = (
pd.DataFrame(np.random.rand(1, 3)),
pd.DataFrame(np.random.rand(2, 3)),
pd.DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C']),
pd.DataFrame(["str1", "str2", "str3"]), # single dim sting array
pd.DataFrame([np.nan]), # special values
pd.DataFrame([math.nan]), # special values
pd.DataFrame([" ", 'a"b', "a,b", "a\nb"]), # special values
pd.DataFrame({"test": [" ", 'a"b', "a,b", "a\nb"]}), # special values
# pd.Series(np.random.rand(2)), # TODO: Series support
# pd.DataFrame([""]), # TODO: -> NaN
)
@pytest.fixture(params=DF_CASES)
def df(request):
return request.param
@pytest.fixture(params=pytest.DF_ORIENTS)
def orient(request):
return request.param
def test_batch_read_dataframes_from_mixed_json_n_csv(df):
test_datas = []
test_types = []
# test content_type=application/json with various orients
for orient in pytest.DF_ORIENTS:
try:
assert_df_equal(df, pd.read_json(df.to_json(orient=orient)))
except (AssertionError, ValueError):
# skip cases not supported by official pandas
continue
test_datas.extend([df.to_json(orient=orient).encode()] * 3)
test_types.extend(['application/json'] * 3)
df_merged, slices = read_dataframes_from_json_n_csv(
test_datas, test_types, orient=None
) # auto detect orient
test_datas.extend([df.to_csv(index=False).encode()] * 3)
test_types.extend(['text/csv'] * 3)
df_merged, slices = read_dataframes_from_json_n_csv(test_datas, test_types)
for s in slices:
assert_df_equal(df_merged[s], df)
def test_batch_read_dataframes_from_csv_other_CRLF(df):
csv_str = df.to_csv(index=False)
if '\r\n' in csv_str:
csv_str = '\n'.join(_csv_split(csv_str, '\r\n')).encode()
else:
csv_str = '\r\n'.join(_csv_split(csv_str, '\n')).encode()
df_merged, _ = read_dataframes_from_json_n_csv([csv_str], ['text/csv'])
assert_df_equal(df_merged, df)
def test_batch_read_dataframes_from_json_of_orients(df, orient):
test_datas = [df.to_json(orient=orient).encode()] * 3
test_types = ['application/json'] * 3
df_merged, slices = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
df_merged, slices = read_dataframes_from_json_n_csv(test_datas, test_types, orient)
for s in slices:
assert_df_equal(df_merged[s], df)
def test_batch_read_dataframes_from_json_with_wrong_orients(df, orient):
test_datas = [df.to_json(orient='table').encode()] * 3
test_types = ['application/json'] * 3
with pytest.raises(BadInput):
read_dataframes_from_json_n_csv(test_datas, test_types, orient)
def test_batch_read_dataframes_from_json_in_mixed_order():
# different column order when orient=records
df_json = b'[{"A": 1, "B": 2, "C": 3}, {"C": 6, "A": 2, "B": 4}]'
df_merged, slices = read_dataframes_from_json_n_csv([df_json], ['application/json'])
for s in slices:
assert_df_equal(df_merged[s], pd.read_json(df_json))
# different row/column order when orient=columns
df_json1 = b'{"A": {"1": 1, "2": 2}, "B": {"1": 2, "2": 4}, "C": {"1": 3, "2": 6}}'
df_json2 = b'{"B": {"1": 2, "2": 4}, "A": {"1": 1, "2": 2}, "C": {"1": 3, "2": 6}}'
df_json3 = b'{"A": {"1": 1, "2": 2}, "B": {"2": 4, "1": 2}, "C": {"1": 3, "2": 6}}'
df_merged, slices = read_dataframes_from_json_n_csv(
[df_json1, df_json2, df_json3], ['application/json'] * 3
)
for s in slices:
assert_df_equal(
df_merged[s][["A", "B", "C"]], pd.read_json(df_json1)[["A", "B", "C"]]
)
def test_guess_orient(df, orient):
json_str = df.to_json(orient=orient)
guessed_orient = _guess_orient(json.loads(json_str))
assert orient == guessed_orient or orient in guessed_orient
@pytest.mark.skipif('not psutil.POSIX')
def test_benchmark_load_dataframes():
'''
read_dataframes_from_json_n_csv should be 30x faster than pd.read_json + pd.concat
'''
test_count = 50
dfs = [pd.DataFrame(np.random.rand(10, 100)) for _ in range(test_count)]
inputs = [df.to_json().encode() for df in dfs]
time_st = time.time()
dfs = [ | pd.read_json(i) | pandas.read_json |
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import sys
def get_scatter(df=None):
if df is None:
# x = [0, 50, 100, 200, 300, 400, 500, 600]
# y = [0, 1.5, 2, 4, 7.5, 12.5, 20, 40.6]
# y1 = [0, 1.7, 3, 5, 8.5, 15.5, 24, 42.6]
d1 = np.random.normal(5, 0.5, 200)
d2 = np.random.normal(30, 1.2, 60)
d3 = np.random.normal(6, 1, 60)
d11 = np.random.normal(5, 0.5, 200)
d22 = np.random.normal(30, 1.2, 60)
d33 = np.random.normal(6, 1, 60)
y = np.concatenate((d1, d2, d3)).flatten()
y1 = np.concatenate((d11, d22, d33)).flatten()
x = np.arange(len(y))
else:
x = np.arange(len(df))
y = df['S1']
y1 = df['S2']
fig = go.Figure()
fig.add_trace(go.Scatter(x=x,
y=y,
name='Sensor 1',
marker_color='#E48F72'))
fig.add_trace(go.Scatter(x=x,
y=y1,
name='Sensor 2',
marker_color='lightgreen'))
fig.update_layout(title=dict(
x=0.5,
y=0.8,
font=dict(size=20, color='black')),
legend=dict(
x=0,
y=1,
bgcolor = '#373a40',
traceorder='normal',
font=dict(
size=12,
color= 'white'),
),
template='plotly_dark',
height=330,
width=800,
font=dict(family="Courier",
size=12, color='black'),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='gray',
margin=dict(t=50, b=70, l=80, r=1))
fig.update_xaxes(title='Time Interval [100ms]')
fig.update_yaxes(title='PRESSURE in PSI')
return fig
def get_bar(color='steelblue', title='', df=None):
SIZE = 10
if df is None:
d1 = np.random.normal(5, 0.5, 200)
d2 = np.random.normal(30, 1.2, 60)
d3 = np.random.normal(6, 1, 60)
data = np.concatenate((d1, d2, d3))
df = pd.DataFrame()
df['data'] = data
else:
data = df.values
modulo = len(data) % SIZE
df = | pd.DataFrame() | pandas.DataFrame |
import logging
import webbrowser
from datetime import datetime
import pandas as pd
import wx
import wx.grid
import wx.dataview as dv
from Common.GUIText import Datasets as GUIText
from Common.GUIText import Filtering as FilteringGUIText
import Common.Constants as Constants
import Common.Objects.Datasets as Datasets
import Common.Objects.GUIs.Codes as CodesGUIs
import Common.Objects.Utilities.Datasets as DatasetsUtilities
# This model acts as a bridge between the DatasetsViewCtrl and the dataset to
# organizes it hierarchically as a collection of Datasets.
# This model provides these data columns:
# 0. Name: string
# 1. Source: string
# 2. Type: string
# 3. Grouping Field: string
# 4. Number of documents: int
# 5. Created datetime
class DatasetsViewModel(dv.PyDataViewModel):
def __init__(self, data):
dv.PyDataViewModel.__init__(self)
self.data = data
self.UseWeakRefs(True)
def GetColumnCount(self):
'''Report how many columns this model provides data for.'''
return 6
def GetColumnType(self, col):
return "string"
def GetChildren(self, parent, children):
# If the parent item is invalid then it represents the hidden root
# item, so we'll use the genre objects as its children and they will
# end up being the collection of visible roots in our tree.
if not parent:
for dataset in self.data:
children.append(self.ObjectToItem(dataset))
return len(self.data)
# Otherwise we'll fetch the python object associated with the parent
# item and make DV items for each of it's child objects.
node = self.ItemToObject(parent)
if isinstance(node, Datasets.Dataset):
for key in node.computational_fields:
children.append(self.ObjectToItem(node.computational_fields[key]))
return len(children)
return 0
def GetParent(self, item):
if not item:
return dv.NullDataViewItem
node = self.ItemToObject(item)
if node.parent == None:
return dv.NullDataViewItem
else:
return self.ObjectToItem(node.parent)
def GetValue(self, item, col):
''''Fetch the data object for this item's column.'''
node = self.ItemToObject(item)
if isinstance(node, Datasets.Dataset):
dataset_type = DatasetsUtilities.DatasetTypeLabel(node)
mapper = { 0 : node.name,
1 : node.dataset_source,
2 : dataset_type,
3 : len(node.data),
4 : node.created_dt.strftime("%Y-%m-%d, %H:%M:%S")
}
return mapper[col]
elif isinstance(node, Datasets.Field):
mapper = { 0 : node.name,
1 : "",
2 : "",
3 : 0,
4 : node.created_dt.strftime("%Y-%m-%d, %H:%M:%S")
}
return mapper[col]
else:
raise RuntimeError("unknown node type")
def GetAttr(self, item, col, attr):
'''retrieves custom attributes for item'''
node = self.ItemToObject(item)
if col == 0:
if isinstance(node, Datasets.Dataset):
attr.SetColour('blue')
attr.SetBold(True)
return True
return False
def HasContainerColumns(self, item):
if not item:
return False
return True
def IsContainer(self, item):
''' Return False for Field, True otherwise for this model.'''
# The hidden root is a container
if not item:
return True
node = self.ItemToObject(item)
if isinstance(node, Datasets.Field):
return False
# but everything elseare not
return True
def SetValue(self, value, item, col):
'''only allowing updating of Dataset names as rest is connected to data being retrieved'''
node = self.ItemToObject(item)
if col == 0:
if isinstance(node, Datasets.Dataset):
main_frame = wx.GetApp().GetTopWindow()
if value != node.name:
node.name = value
main_frame.DatasetsUpdated()
return True
#This view enables displaying datasets and how they are grouped
class DatasetsViewCtrl(dv.DataViewCtrl):
def __init__(self, parent, model):
dv.DataViewCtrl.__init__(self, parent, style=dv.DV_MULTIPLE|dv.DV_ROW_LINES)
self.AssociateModel(model)
model.DecRef()
editabletext_renderer = dv.DataViewTextRenderer(mode=dv.DATAVIEW_CELL_EDITABLE)
column0 = dv.DataViewColumn(GUIText.NAME, editabletext_renderer, 0, align=wx.ALIGN_LEFT)
self.AppendColumn(column0)
text_renderer = dv.DataViewTextRenderer()
column1 = dv.DataViewColumn(GUIText.SOURCE, text_renderer, 1, align=wx.ALIGN_LEFT)
self.AppendColumn(column1)
text_renderer = dv.DataViewTextRenderer()
column2 = dv.DataViewColumn(GUIText.TYPE, text_renderer, 2, align=wx.ALIGN_LEFT)
self.AppendColumn(column2)
int_renderer = dv.DataViewTextRenderer(varianttype="long")
column4 = dv.DataViewColumn(GUIText.DOCUMENT_NUM, int_renderer, 3, align=wx.ALIGN_LEFT)
self.AppendColumn(column4)
text_renderer = dv.DataViewTextRenderer()
column5 = dv.DataViewColumn(GUIText.RETRIEVED_ON, text_renderer, 4, align=wx.ALIGN_LEFT)
self.AppendColumn(column5)
for column in self.Columns:
column.Sortable = True
column.Reorderable = True
column.Resizeable = True
self.Expander(None)
self.Bind(dv.EVT_DATAVIEW_ITEM_CONTEXT_MENU, self.OnShowPopup)
self.Bind(wx.EVT_MENU, self.OnCopyItems, id=wx.ID_COPY)
accel_tbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord('C'), wx.ID_COPY)])
self.SetAcceleratorTable(accel_tbl)
def Expander(self, item, autosize_flg=True):
model = self.GetModel()
if item != None:
self.Expand(item)
children = []
model.GetChildren(item, children)
for child in children:
self.Expander(child, False)
if autosize_flg:
self.AutoSize()
def AutoSize(self):
for column in self.GetColumns():
column.SetWidth(wx.COL_WIDTH_AUTOSIZE)
def OnShowPopup(self, event):
menu = wx.Menu()
menu.Append(1, GUIText.COPY)
menu.Bind(wx.EVT_MENU, self.OnCopyItems)
self.PopupMenu(menu)
def OnCopyItems(self, event):
selected_items = []
model = self.GetModel()
for item in self.GetSelections():
dataset = model.GetValue(item, 0)
source = model.GetValue(item, 1)
datasettype = model.GetValue(item, 2)
selected_items.append('\t'.join([dataset, source, datasettype]).strip())
clipdata = wx.TextDataObject()
clipdata.SetText("\n".join(selected_items))
wx.TheClipboard.Open()
wx.TheClipboard.SetData(clipdata)
wx.TheClipboard.Close()
#to make only one needed for each entry in main_frame.datasets regardless of which type of dataset is used
class DatasetsDataGridTable(wx.grid.GridTableBase):
def __init__(self, dataset):
wx.grid.GridTableBase.__init__(self)
self.dataset = dataset
self.data_df = pd.DataFrame(self.dataset.data.values())
self.label_column_names = []
self.label_col_types = []
self.data_column_names = []
self.GetColNames()
if not hasattr(self.dataset, 'label_fields'):
self.data_df['created_utc']= | pd.to_datetime(self.data_df['created_utc'], unit='s', utc=True) | pandas.to_datetime |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = | pd.concat([dfAll, df]) | pandas.concat |
from datetime import datetime, timedelta
import re
import sys
import numpy as np
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.core.common as com
from pandas.compat import StringIO, callable
import pandas.compat as compat
try:
import dateutil
# raise exception if dateutil 2.0 install on 2.x platform
if (sys.version_info[0] == 2 and
dateutil.__version__ == '2.0'): # pragma: no cover
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
except ImportError: # pragma: no cover
print('Please install python-dateutil via easy_install or some method!')
raise # otherwise a 2nd import won't show the message
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _infer_tzinfo(start, end):
def _infer(a, b):
tz = a.tzinfo
if b and b.tzinfo:
if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):
raise AssertionError('Inputs must both have the same timezone,'
' {0} != {1}'.format(tz, b.tzinfo))
return tz
tz = None
if start is not None:
tz = _infer(start, end)
elif end is not None:
tz = _infer(end, start)
return tz
def _guess_datetime_format(dt_str, dayfirst=False,
dt_str_parse=compat.parse_date,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
dayfirst : boolean, default False
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_parse : function, defaults to `compate.parse_date` (dateutil)
This function should take in a datetime string and return
a `datetime.datetime` guess that the datetime string represents
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime formatt string (for `strftime` or `strptime`)
"""
if dt_str_parse is None or dt_str_split is None:
return None
if not isinstance(dt_str, compat.string_types):
return None
day_attribute_and_format = (('day',), '%d')
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d'),
(('year',), '%Y'),
(('month',), '%B'),
(('month',), '%b'),
(('month',), '%m'),
day_attribute_and_format,
(('hour',), '%H'),
(('minute',), '%M'),
(('second',), '%S'),
(('microsecond',), '%f'),
(('second', 'microsecond'), '%S.%f'),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
try:
parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
except:
# In case the datetime can't be parsed, its format cannot be guessed
return None
if parsed_datetime is None:
return None
try:
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
if (token_format is None and
tokens[i] == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
found_attrs.update(attrs)
break
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
float(tokens[i])
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = com.notnull(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True, coerce=False,
unit='ns', infer_datetime_format=False):
"""
Convert argument to datetime.
Parameters
----------
arg : string, datetime, array of strings (with possible NAs)
errors : {'ignore', 'raise'}, default 'ignore'
Errors are ignored by default (values left untouched).
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil beahavior).
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
If True returns a DatetimeIndex, if False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
If True, require an exact format match.
If False, allow the format to match anywhere in the target string.
coerce : force errors to NaT (False by default)
Timestamps outside the interval between Timestamp.min and Timestamp.max
(approximately 1677-09-22 to 2262-04-11) will be also forced to NaT.
unit : unit of the arg (D,s,ms,us,ns) denote the unit in epoch
(e.g. a unix timestamp), which is an integer/float number.
infer_datetime_format : boolean, default False
If no `format` is given, try to infer the format based on the first
datetime string. Provides a large speed-up in many cases.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or correspoding array/Series).
Examples
--------
Take separate series and convert to datetime
>>> import pandas as pd
>>> i = pd.date_range('20000101',periods=100)
>>> df = pd.DataFrame(dict(year = i.year, month = i.month, day = i.day))
>>> pd.to_datetime(df.year*10000 + df.month*100 + df.day, format='%Y%m%d')
0 2000-01-01
1 2000-01-02
...
98 2000-04-08
99 2000-04-09
Length: 100, dtype: datetime64[ns]
Or from strings
>>> df = df.astype(str)
>>> pd.to_datetime(df.day + df.month + df.year, format="%d%m%Y")
0 2000-01-01
1 2000-01-02
...
98 2000-04-08
99 2000-04-09
Length: 100, dtype: datetime64[ns]
Date that does not meet timestamp limitations:
>>> pd.to_datetime('13000101', format='%Y%m%d')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', coerce=True)
NaT
"""
return _to_datetime(arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, box=box, format=format, exact=exact, coerce=coerce,
unit=unit, infer_datetime_format=infer_datetime_format)
def _to_datetime(arg, errors='ignore', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True, coerce=False,
unit='ns', freq=None, infer_datetime_format=False):
"""
Same as to_datetime, but accept freq for
DatetimeIndex internal construction
"""
from pandas.core.series import Series
from pandas.tseries.index import DatetimeIndex
def _convert_listlike(arg, box, format):
if isinstance(arg, (list,tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if com.is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz='utc' if utc else None)
except ValueError:
pass
return arg
elif format is None and com.is_integer_dtype(arg) and unit=='ns':
result = arg.astype('datetime64[ns]')
if box:
return DatetimeIndex(result, tz='utc' if utc else None)
return result
arg = com._ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = (
('%Y-%m-%dT%H:%M:%S.%f'.startswith(format) or
'%Y-%m-%d %H:%M:%S.%f'.startswith(format)) and
format != '%Y'
)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, coerce=coerce)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
# fallback
if result is None:
try:
result = tslib.array_strptime(
arg, format, exact=exact, coerce=coerce
)
except (tslib.OutOfBoundsDatetime):
if errors == 'raise':
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(arg, raise_=errors=='raise',
utc=utc, dayfirst=dayfirst,
yearfirst=yearfirst, freq=freq,
coerce=coerce, unit=unit,
require_iso8601=require_iso8601)
if com.is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz='utc' if utc else None)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, None, tz=tz)
except (ValueError, TypeError):
raise e
if arg is None:
return arg
elif isinstance(arg, tslib.Timestamp):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg.values, False, format)
return Series(values, index=arg.index, name=arg.name)
elif com.is_list_like(arg):
return _convert_listlike(arg, box, format)
return _convert_listlike(np.array([ arg ]), box, format)[0]
def _attempt_YYYYMMDD(arg, coerce):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) """
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
return tslib.array_to_datetime( | lib.try_parse_year_month_day(carg/10000,carg/100 % 100, carg % 100) | pandas.lib.try_parse_year_month_day |
"""
Created on Thu Jan 26 17:04:11 2017
Preprocess Luna datasets and create nodule masks (and/or blank subsets)
NOTE that:
1. we do NOT segment the lungs at all -- we will use the raw images for training (DO_NOT_SEGMENT = True)
2. No corrections are made to the nodule radius in relation to the thickness of the layers (radius = (ca[4])/2, simply)
@author: <NAME>, <EMAIL>
Some functions have reused from the respective examples/kernels openly published at the https://www.kaggle.com/arnavkj95/data-science-bowl-2017/ , as referenced within the file
"""
#%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import scipy.ndimage as ndimage
import scipy.ndimage # added for scaling\
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
DO_NOT_SEGMENT = True #### major difference with the initial/Feb version
RESIZE_SPACING = [2,2,2]
### z, y, x (x & y MUST be the same)
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
MARKER_INTERNAL_THRESH = -400 # was -400; maybe use -320 ??
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True # NOT a good idea; no added value
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 ## added for tests; 5 for disk seems sufficient - fo safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
#image = image_slices[70]
def seperate_lungs_cv2(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
#image = scipy.ndimage.interpolation.zoom(image, real_resize_factor) # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
#image = scipy.ndimage.zoom(image, real_resize_factor, order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)
#image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)
return image, new_spacing
def segment_one(image_slices):
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and may lead to incorrect results when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0
## have to reduce dx to 0 as for instance at least one image of the lungs stretch right to the border even without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
#maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
# mask OK< crop the image and mask
### full crop
#image = image[bb[0]:bb[3], bb[1]:bb[4], bb[2]:bb[5]]
#mask = mask[bb[0]:bb[3], bb[1]:bb[4], bb[2]:bb[5]]
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
useSummaryPlot = True
if useSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
return l_segmented, image
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
radius = (ca[4])/2 # VERSION iseg_luna3 - DO NOT CORRECT the radiius in ANY way ...!!
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (constrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
luna_subset = 0 # initial
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
file = files[12] # rough empty set test - if file is empty this would fail; 12th - 3 nodules
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
print ("Luna annotations (head)")
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
print (cands)
def create_nodule_mask(imagePath, cands):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
resize_factor = spacing / RESIZE_SPACING # was [1, 1, 1]
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
start = time.time()
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize, mode='nearest') # Andre mode added
if DO_NOT_SEGMENT:
lung_seg = lung_img
lung_seg_crop = lung_img
print("Rescale time, and path: ", ((time.time() - start)), imagePath )
else:
lung_seg, lung_seg_crop = segment_one(lung_img)
print("Rescale & Seg time, and path: ", ((time.time() - start)), imagePath )
useTestPlot = False
if useTestPlot:
plt.hist(img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_seg.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = img.shape[0] // 2
# Show some slice in the middle
plt.imshow(img[img_sel_i], cmap=plt.cm.gray)
plt.show()
img_sel_i = lung_seg.shape[0] // 2
# Show some slice in the middle
plt.imshow(lung_seg[img_sel_i], cmap='gray')
plt.show()
# Show some slice in the middle
plt.imshow(lung_seg_crop[img_sel_i], cmap='gray')
plt.show()
#create nodule mask
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
if useTestPlot:
lung_img.shape
lung_seg.shape
lung_seg_crop.shape
nodule_mask.shape
for i in range(nodule_mask.shape[0]):
print ("Slice: ", i)
plt.imshow(nodule_mask[i], cmap='gray')
plt.show()
img_sel_i = 146 # 36
plt.imshow(lung_seg[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
w448 = int(448 // RESIZE_SPACING[1]) # we use 448 as this would be not enough just for 3 out of 1595 patients giving the pixels resolution ...:
#lung_img_448, lung_seg_448, nodule_mask_448 = np.zeros((lung_img.shape[0], w448, w448)), np.zeros((lung_seg.shape[0], w448, w448)), np.zeros((nodule_mask.shape[0], w448, w448))
lung_img_448 = np.full ((lung_img.shape[0], w448, w448), -2000, dtype=np.int16)
lung_seg_448 = np.full ((lung_seg.shape[0], w448, w448), -2000, dtype=np.int16)
nodule_mask_448 = np.zeros((nodule_mask.shape[0], w448, w448), dtype=np.int16)
original_shape = lung_img.shape
if (original_shape[1] > w448):
## need to crop the image to w448 size ...
print("Warning: additional crop from ... to width of: ", original_shape, w448)
offset = (w448 - original_shape[1])
y_min = abs(offset // 2 ) ## we use the same diff order as for offset below to ensure correct cala of new_origin (if we ever neeed i)
y_max = y_min + w448
lung_img = lung_img[:,y_min:y_max,:]
lung_seg = lung_seg[:,y_min:y_max,:]
nodule_mask = nodule_mask[:,y_min:y_max,:]
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
origin = new_origin
original_shape = lung_img.shape
if (original_shape[2] > w448):
x_min = (original_shape[2] - w448) // 2
x_max = x_min + w448
lung_img = lung_img[:,:,x_min:x_max]
lung_seg = lung_seg[:,:,x_min:x_max]
nodule_mask = nodule_mask[:,:,x_min:x_max]
original_shape = lung_img.shape
offset = (w448 - original_shape[1])
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
if offset > 0: #
for z in range(lung_img.shape[0]):
### if new_origin is used check the impact of the above crop for instance for:
### path = "'../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.430109407146633213496148200410'
lung_img_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_img[z,:,:]
lung_seg_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_seg[z,:,:]
nodule_mask_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
else:
lung_img_448 = lung_img # equal dimensiona, just copy all (no nee to add the originals withion a frame)
lung_seg_448 = lung_seg
nodule_mask_448 = nodule_mask
nodule_mask_448_sum = np.sum(nodule_mask_448, axis=0)
if useTestPlot:
lung_img_448.shape
lung_seg_448.shape
#lung_seg_crop.shape
nodule_mask_448.shape
img_sel_i = 146 # 36
plt.imshow(lung_img_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(lung_seg_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask_448[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg_448[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
useSummaryPlot = True
if useSummaryPlot:
mask_sum_mean_x100 = 100 * np.mean(nodule_mask_448_sum)
axis = 1
lung_projections = []
mask_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
lung_projections.append(np.mean(lung_seg_448, axis=axis))
mask_projections.append(np.max(nodule_mask_448, axis=axis))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(lung_projections[0],cmap=plt.cm.gray)
ax[1].imshow(lung_projections[1],cmap=plt.cm.gray)
ax[2].imshow(lung_projections[2],cmap=plt.cm.gray)
plt.show()
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(mask_projections[0],cmap=plt.cm.gray)
ax[1].imshow(mask_projections[1],cmap=plt.cm.gray)
ax[2].imshow(mask_projections[2],cmap=plt.cm.gray)
plt.show()
print ("Mask_sum_mean_x100: ", mask_sum_mean_x100)
# save images.
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if DO_NOT_SEGMENT:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1) # data removed from the second part on AWS
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
if DO_NOT_SEGMENT:
np.savez_compressed(path_segmented + '_lung', lung_seg_448)
else:
np.savez_compressed(path_segmented + '_lung_seg', lung_seg_448)
np.savez_compressed(path_segmented + '_nodule_mask', nodule_mask_448)
return
def find_lungs_range(y, noise):
n = len(y)
mid = n // 2
new_start = 0
for i in range(mid, 0, -1):
if y[i] < noise:
new_start = i
break
new_end = n
for i in range(mid, n, 1):
if y[i] < noise:
new_end = i
break
return new_start, new_end
def update_nodule_mask_or_blank (imagePath, cands, true_mask=True):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
# load the old one and copy across
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if DO_NOT_SEGMENT:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
if true_mask:
# nothing to update reload and copy over
mask_img_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
nodule_mask_448 = mask_img_z['arr_0']
print("Loading and saving _nodule_mask as _nodule_mask_wblanks for: ", path_segmented)
else:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
resize_factor = spacing / RESIZE_SPACING
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
# loading of the image/images to sync with the update -- DOES NOT WORK
attempt_through_reloading = False ## this has failed
if attempt_through_reloading:
if DO_NOT_SEGMENT:
lung_img_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
lung_img_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
lung_img = lung_img_z['arr_0']
else:
## have to redo the calculations
start = time.time()
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize, mode='nearest') # Andre mode added
if DO_NOT_SEGMENT:
lung_seg = lung_img
lung_seg_crop = lung_img
print("Rescale time, and path: ", ((time.time() - start)), imagePath )
else:
lung_seg, lung_seg_crop = segment_one(lung_img)
print("Rescale & Seg time, and path: ", ((time.time() - start)), imagePath )
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
if not true_mask:
nodule_mask = -1 * nodule_mask # mark it as invalid to be zeroed later on (needed to get the blanks)
useTestPlot = False
if useTestPlot:
lung_img.shape
lung_seg.shape
lung_seg_crop.shape
nodule_mask.shape
#mask0 = np.load(''.join((path_segmented + '_module_mask' + '.npz')))
for i in range(nodule_mask.shape[0]):
print ("Slice: ", i)
plt.imshow(nodule_mask[i], cmap='gray')
plt.show()
w448 = int(448 // RESIZE_SPACING[1]) # we use 448 as this would be not enough just for 3 out of 1595 patients giving the pixels resolution ...:
nodule_mask_448 = np.zeros((nodule_mask.shape[0], w448, w448), dtype=np.int16)
original_shape = lung_img.shape
if (original_shape[1] > w448):
## need to crop the image to w448 size ...
print("Warning: additional crop from ... to width of: ", original_shape, w448)
offset = (w448 - original_shape[1])
y_min = abs(offset // 2 ) ## we use the same diff order as for offset below to ensure correct calculations of new_origin (if we ever neeed i)
y_max = y_min + w448
nodule_mask = nodule_mask[:,y_min:y_max,:]
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
origin = new_origin
original_shape = lung_img.shape
if (original_shape[2] > w448):
x_min = (original_shape[2] - w448) // 2
x_max = x_min + w448
nodule_mask = nodule_mask[:,:,x_min:x_max]
original_shape = lung_img.shape
offset = (w448 - original_shape[1])
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
if offset > 0: #
for z in range(lung_img.shape[0]):
nodule_mask_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
else:
nodule_mask_448 = nodule_mask
nodule_mask_448_sum = np.sum(nodule_mask_448, axis=0)
if useTestPlot:
nodule_mask_448.shape
img_sel_i = 146 # 36
plt.imshow(nodule_mask_448[img_sel_i], cmap='gray')
plt.show()
useSummaryPlot = False
if useSummaryPlot:
mask_sum_mean_x100 = 100 * np.mean(nodule_mask_448_sum)
count_blanks = np.sum(nodule_mask_448 < 0)
axis = 1
lung_projections = []
mask_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
#lung_projections.append(np.mean(lung_seg_448, axis=axis))
mask_projections.append(np.max(nodule_mask_448, axis=axis))
#f, ax = plt.subplots(1, 3, figsize=(15,5))
#ax[0].imshow(lung_projections[0],cmap=plt.cm.gray)
#ax[1].imshow(lung_projections[1],cmap=plt.cm.gray)
#ax[2].imshow(lung_projections[2],cmap=plt.cm.gray)
#plt.show()
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(mask_projections[0],cmap=plt.cm.gray)
ax[1].imshow(mask_projections[1],cmap=plt.cm.gray)
ax[2].imshow(mask_projections[2],cmap=plt.cm.gray)
plt.show()
print ("Mask_sum_mean_x100, blanks built-in: ", mask_sum_mean_x100, count_blanks)
np.savez_compressed(path_segmented + '_nodule_mask_wblanks', nodule_mask_448)
return
def create_nodule_mask_or_blank (imagePath, cands, true_mask=True):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
resize_factor = spacing / RESIZE_SPACING # was [1, 1, 1]
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
start = time.time()
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize, mode='nearest') # Andre mode added
if DO_NOT_SEGMENT:
lung_seg = lung_img
lung_seg_crop = lung_img
print("Rescale time, and path: ", ((time.time() - start)), imagePath )
else:
lung_seg, lung_seg_crop = segment_one(lung_img)
print("Rescale & Seg time, and path: ", ((time.time() - start)), imagePath )
useTestPlot = False
if useTestPlot:
plt.hist(img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_img.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(lung_seg.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = img.shape[0] // 2
# Show some slice in the middle
plt.imshow(img[img_sel_i], cmap=plt.cm.gray)
plt.show()
img_sel_i = lung_img.shape[0] // 2
# Show some slice in the middle
plt.imshow(lung_img[img_sel_i], cmap='gray')
plt.show()
# Show some slice in the middle
plt.imshow(lung_img[:, 4* lung_img.shape[1] // 6], cmap='gray')
plt.show()
HU_LUNGS_MIN = -900 # the algo is sensitive to this value -- keep it 900 unless retested
HU_LUNGS_MAX = -400
jsteps = 10
for j in range(jsteps):
# Show some slice in the middle
img_sel_i = j * lung_img.shape[1] // jsteps
img_cut = lung_img[:, img_sel_i]
lix = (img_cut > HU_LUNGS_MIN) & (img_cut < HU_LUNGS_MAX)
lix_y = np.sum(lix, axis=1)
print ("Cut & ratio, lix_y (min, mean, max): ", j, j/jsteps, np.min(lix_y),np.mean(lix_y), np.max(lix_y) )
noise = 3 * np.min(lix_y)
noise = 0.05 * np.max(lix_y)
noise = max([3 * np.min(lix_y), 0.05 * np.max(lix_y)])
print ("Lungs range: ", find_lungs_range(lix_y, noise))
plt.imshow(img_cut, cmap='gray')
plt.show()
plt.imshow(lix, cmap='gray')
plt.show()
plt.plot (lix_y)
plt.show()
ymin = int(0.4 * lung_img.shape[1])
ymax = int(0.6 * lung_img.shape[1])
zmin_new = lung_img.shape[0] // 2
zmax_new = lung_img.shape[0] // 2
j = ymin
for j in range(ymin, ymax+1):
img_cut = lung_img[:, j]
img_cut_lungs = (img_cut > HU_LUNGS_MIN) & (img_cut < HU_LUNGS_MAX)
lungs_across = np.sum(img_cut_lungs, axis = 1)
#noise_bottom_some = np.mean(lungs_across[0:5])
noise = np.max([3*np.min(lungs_across), 0.05 * np.max(lungs_across)]) # experimanetal -- could fail is scan has only central part of lungs and no borders at all -- CHECK
zmin, zmax = find_lungs_range(lungs_across, noise)
if zmin < zmin_new:
zmin_new = zmin
if zmax > zmax_new:
print ("j, zmax: ", j, zmax)
zmax_new = zmax
plt.imshow(img_cut, cmap='gray')
plt.show()
plt.imshow(img_cut_lungs, cmap='gray')
plt.show()
plt.plot (lungs_across)
plt.show()
HU_LUNGS_MIN = -950
HU_LUNGS_MAX = -400
ling = img #lung_img # lung_img # for our testing here
step = 400
for HU_LUNGS_MIN in range(-1000, 1000, step):
HU_LUNGS_MAX = HU_LUNGS_MIN + step
print ("HU_LUNGS_MIN, HU_LUNGS_MAX: ", HU_LUNGS_MIN, HU_LUNGS_MAX)
lix = (ling > HU_LUNGS_MIN) & (ling < HU_LUNGS_MAX)
lix_z = np.max(lix, axis=0).astype(np.int16)
plt.imshow(lix_z, cmap='gray')
plt.show()
HU_LUNGS_MIN = -900
HU_LUNGS_MAX = -500
ling = img #lung_img # lung_img # for our testing here
print ("HU_LUNGS_MIN, HU_LUNGS_MAX: ", HU_LUNGS_MIN, HU_LUNGS_MAX)
lix = (ling > HU_LUNGS_MIN) & (ling < HU_LUNGS_MAX)
lix_z = np.max(lix, axis=0).astype(np.int16)
lix_z_x = np.sum(lix_z, axis=0)
lix_z_y = np.sum(lix_z, axis=1)
plt.imshow(lix_z, cmap='gray')
plt.show()
plt.plot (lix_z_x)
plt.show()
plt.plot (lix_z_y)
plt.show()
for i in range(0,lung_img.shape[0], 10):
print("section: ", i)
plt.imshow(lung_img[i], cmap='gray')
plt.show()
img_sel_i = lung_seg.shape[0] // 2
# Show some slice in the middle
plt.imshow(lung_seg[img_sel_i], cmap='gray')
plt.show()
# Show some slice in the middle
plt.imshow(lung_seg_crop[img_sel_i], cmap='gray')
plt.show()
#create nodule mask
#cands.diameter_mm = 3.2
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
if not true_mask:
nodule_mask = -1 * nodule_mask # mark it as invalid to be zeroed later on (needed to get the blanks)
#np.sum(nodule_mask)
if useTestPlot:
lung_img.shape
lung_seg.shape
lung_seg_crop.shape
nodule_mask.shape
for i in range(nodule_mask.shape[0]):
print ("Slice: ", i)
plt.imshow(nodule_mask[i], cmap='gray')
plt.show()
img_sel_i = 146 # 36
plt.imshow(lung_seg[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
w448 = int(448 // RESIZE_SPACING[1]) # we use 448 as this would be not enough just for 3 out of 1595 patients giving the pixels resolution ...:
#lung_img_448, lung_seg_448, nodule_mask_448 = np.zeros((lung_img.shape[0], w448, w448)), np.zeros((lung_seg.shape[0], w448, w448)), np.zeros((nodule_mask.shape[0], w448, w448))
lung_img_448 = np.full ((lung_img.shape[0], w448, w448), -2000, dtype=np.int16)
lung_seg_448 = np.full ((lung_seg.shape[0], w448, w448), -2000, dtype=np.int16)
nodule_mask_448 = np.zeros((nodule_mask.shape[0], w448, w448), dtype=np.int16)
original_shape = lung_img.shape
if (original_shape[1] > w448):
## need to crop the image to w448 size ...
print("Warning: additional crop from ... to width of: ", original_shape, w448)
offset = (w448 - original_shape[1])
y_min = abs(offset // 2 ) ## we use the same diff order as for offset below to ensure correct cala of new_origin (if we ever neeed i)
y_max = y_min + w448
lung_img = lung_img[:,y_min:y_max,:]
lung_seg = lung_seg[:,y_min:y_max,:]
nodule_mask = nodule_mask[:,y_min:y_max,:]
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
origin = new_origin
original_shape = lung_img.shape
if (original_shape[2] > w448):
x_min = (original_shape[2] - w448) // 2
x_max = x_min + w448
lung_img = lung_img[:,:,x_min:x_max]
lung_seg = lung_seg[:,:,x_min:x_max]
nodule_mask = nodule_mask[:,:,x_min:x_max]
original_shape = lung_img.shape
offset = (w448 - original_shape[1])
upper_offset = offset// 2
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
if offset > 0: #
for z in range(lung_img.shape[0]):
### if new_origin is used check the impact of the above crop for instance for:
### path = "'../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.430109407146633213496148200410'
lung_img_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_img[z,:,:]
lung_seg_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_seg[z,:,:]
nodule_mask_448[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
else:
lung_img_448 = lung_img # equal dimensiona, just copy all (no nee to add the originals withion a frame)
lung_seg_448 = lung_seg
nodule_mask_448 = nodule_mask
nodule_mask_448_sum = np.sum(nodule_mask_448, axis=0)
#lung_seg_448_mean = np.mean(lung_seg_448, axis=0)
if useTestPlot:
lung_img_448.shape
lung_seg_448.shape
#lung_seg_crop.shape
nodule_mask_448.shape
img_sel_i = 146 # 36
plt.imshow(lung_img_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(lung_seg_448[img_sel_i], cmap=plt.cm.gray)
plt.show()
plt.imshow(nodule_mask_448[img_sel_i], cmap='gray')
plt.show()
for i in range (141, 153):
print ("Slice: ", i)
plt.imshow(lung_seg_448[i], cmap='gray')
plt.show()
#plt.imshow(nodule_mask[i], cmap='gray')
#plt.show()
useSummaryPlot = True
if useSummaryPlot:
mask_sum_mean_x100 = 100 * np.mean(nodule_mask_448_sum)
axis = 1
lung_projections = []
mask_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
lung_projections.append(np.mean(lung_seg_448, axis=axis))
mask_projections.append(np.max(nodule_mask_448, axis=axis))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(lung_projections[0],cmap=plt.cm.gray)
ax[1].imshow(lung_projections[1],cmap=plt.cm.gray)
ax[2].imshow(lung_projections[2],cmap=plt.cm.gray)
plt.show()
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(mask_projections[0],cmap=plt.cm.gray)
ax[1].imshow(mask_projections[1],cmap=plt.cm.gray)
ax[2].imshow(mask_projections[2],cmap=plt.cm.gray)
plt.show()
print ("Mask_sum_mean_x100: ", mask_sum_mean_x100)
# save images.
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if DO_NOT_SEGMENT:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
#np.save(imageName + '_lung_img.npz', lung_img_448)
if DO_NOT_SEGMENT:
np.savez_compressed(path_segmented + '_lung', lung_seg_448)
else:
np.savez_compressed(path_segmented + '_lung_seg', lung_seg_448)
np.savez_compressed(path_segmented + '_nodule_mask', nodule_mask_448)
return
def create_nodule_mask_subset(luna_subset):
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
file = "../luna/original_lungs/subset0/1.3.6.1.4.1.14519.5.2.1.6279.6001.564534197011295112247542153557.mhd"
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#print (cands)
create_nodule_mask (imagePath, cands)
def create_nodule_mask_or_blank_subset(luna_subset, create_wblanks_mask_only=False):
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = | pd.read_csv(LUNA_ANNOTATIONS) | pandas.read_csv |
from pandas import DataFrame
from numpy import where
from warnings import warn
class Compare:
"""
This class compares 2 data frames.
It will show you what has been added,removed, and altered.
This will be output in a dictionary object for use.
"""
def __init__(
self, old_df: DataFrame, new_df: DataFrame, comparison_values: bool = False
) -> None:
"""
old_df: what the df looked like
new_df: what the df changed too
"""
self.df1 = old_df
self.df2 = new_df
# index check to ensure code will run properly
self.__check_index()
# this variable represents if we would like to compare
# the output of the compare function
# this currently only works with numerical (float/int)
# values. For that reason, it defaults to False
# but can be changed using .set_change_comparison()
self.comparison_values = comparison_values
# find which columns were added/removed
# column based comparison
# assign the following variables as lists
self.added_cols = None
self.removed_cols = None
self._column_differences()
# find which rows were added/removed
# index based comparison
self.remove = self.removed()["REMOVED"]
self.add = self.added()["ADDED"]
# assign cleaned versions to compare
self.clean_df1 = (
self.df1.loc[~self.df1.index.isin(self.remove)].copy().sort_index()
)
self.clean_df2 = (
self.df2.loc[~self.df2.index.isin(self.add)].copy().sort_index()
)
# remove column discrepancies
if self.removed_cols is not None:
self.clean_df1.drop(columns=self.removed_cols, inplace=True)
if self.added_cols is not None:
self.clean_df2.drop(columns=self.added_cols, inplace=True)
# after everything has been cleaned, compare the dfs
self.compare()
def __check_index(self) -> None:
# check they are the same type
if self.df1.index.dtype != self.df2.index.dtype:
raise ValueError(
"Your indexes are not the same type. "
"Please re-initalize the class with 2 DataFrames "
"that have the same index"
)
# check they are the same name (only a warning)
if self.df1.index.name != self.df2.index.name:
warn(
"Your indexes are not the same name, please ensure "
"they are the same unique identifier. "
"You may experience strange output",
category=RuntimeWarning,
stacklevel=2,
)
def _column_differences(self) -> None:
self.added_cols = [x for x in self.df2.columns if x not in self.df1.columns]
self.removed_cols = [x for x in self.df1.columns if x not in self.df2.columns]
def set_change_comparison(self, change_value: bool) -> None:
self.comparison_values = change_value
def compare(self) -> DataFrame:
"""
COMPARE 2 pd.dfs and returns the index & columns as well as what changed.
Indexes must be matching same length/type
Based on
https://stackoverflow.com/questions/17095101/
compare-two-dataframes-and-output-their-differences-side-by-side
"""
try:
ne_stacked = (self.clean_df1 != self.clean_df2).stack()
changed = ne_stacked[ne_stacked]
changed.index.names = self.clean_df1.index.names + ["Column"]
difference_locations = where(self.clean_df1 != self.clean_df2)
except ValueError:
raise ValueError(
"Please make sure your Indexes are named the same, and the same type"
)
changed_from = self.clean_df1.values[difference_locations]
changed_to = self.clean_df2.values[difference_locations]
final = | DataFrame({"from": changed_from, "to": changed_to}, index=changed.index) | pandas.DataFrame |
#!/usr/bin/env python
################################################################################
# SETUP
################################################################################
# Load required modules
import sys, os, argparse, logging, pandas as pd, numpy as np, json
from sklearn.model_selection import *
# Load our modules
from models import MODEL_NAMES, init_model, RF, EN, FEATURE_CLASSES
from metrics import compute_metrics, RMSE, MAE, MSE
from i_o import getLogger
# Parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ff', '--feature_file', type=str, required=True)
parser.add_argument('-fcf', '--feature_class_file', type=str, required=True)
parser.add_argument('-of', '--outcome_file', type=str, required=True)
parser.add_argument('-op', '--output_prefix', type=str, required=True)
parser.add_argument('-m', '--model', type=str, required=True, choices=MODEL_NAMES)
parser.add_argument('-mi', '--max_iter', type=int, required=False,
default=1000000,
help='ElasticNet only. Default is parameter used for the paper submission.')
parser.add_argument('-t', '--tol', type=float, required=False,
default=1e-7,
help='ElasticNet only. Default is parameter used for the paper submission.')
parser.add_argument('-v', '--verbosity', type=int, required=False, default=logging.INFO)
parser.add_argument('-nj', '--n_jobs', type=int, default=1, required=False)
parser.add_argument('-efc', '--excluded_feature_classes', type=str, required=False,
nargs='*', default=[], choices=FEATURE_CLASSES)
parser.add_argument('-rs', '--random_seed', type=int, default=12345, required=False)
args = parser.parse_args(sys.argv[1:])
# Set up logger
logger = getLogger(args.verbosity)
# Load the input data
X = pd.read_csv(args.feature_file, index_col=0, sep='\t')
y = pd.read_csv(args.outcome_file, index_col=0, sep='\t')
feature_classes = | pd.read_csv(args.feature_class_file, index_col=0, sep='\t') | pandas.read_csv |
import os
import datetime
import backtrader as bt
import pandas as pd
from matplotlib import pyplot as plt
from S1_task1 import *
obj = CryptoCompare('BTC', 'USDT', 'binance')
obj.download_histohour('2020-01-01', '2020-04-01')
# declear all environment params / global variables
datadir = 'E:\Yiru Xiong-Professional\实习\CryptoAlgoWheel\Month1\S2\data' # data path
logdir = 'E:\Yiru Xiong-Professional\实习\CryptoAlgoWheel\Month1\S2\log' # log path
reportdir = 'E:\Yiru Xiong-Professional\实习\CryptoAlgoWheel\Month1\S2\\report' # report path
datafile = 'BTC_USDT_1h.csv' # data file
from_datetime = '2020-01-01 00:00:00' # start time
to_datetime = '2020-04-01 00:00:00' # end time
logfile = 'BTC_USDT_1h_EMACross_10_20_2020-01-01_2020-04-01.csv'
figfile = 'BTC_USDT_1h_EMACross_10_20_2020-01-01_2020-04-01.png'
# define strategy class
class EMACross(bt.Strategy):
params = (
('pfast', 10),
('pslow', 20),
)
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
# Add a MovingAverageSimple indicator
self.emafast = bt.indicators.ExponentialMovingAverage(
self.datas[0], period=self.params.pfast)
self.emaslow = bt.indicators.ExponentialMovingAverage(
self.datas[0], period=self.params.pslow)
def next(self):
nfast = self.params.pfast
nslow = self.params.pslow
close_data = self.dataclose
print(close_data)
ma_nfast = close_data['close'][-nfast:].mean()
ma_nslow = close_data['close'][-nslow:].mean()
print(ma_nfast, ma_nslow)
self.log('Close, %.2f' % self.dataclose[0])
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if ma_nfast > ma_nslow:
self.log('BUY CREATE, %.2f' % self.dataclose[0])
self.order = self.buy()
else:
if ma_nfast < ma_nslow:
self.log('SELL CREATE, %.2f' % self.dataclose[0])
self.order = self.sell()
if __name__ == '__main__':
# initiate cerebro instance:
cerebro = bt.Cerebro()
# feed data:
data = pd.read_csv(os.path.join(datadir, datafile),
index_col='datetime', parse_dates=True)
data = data.loc[
(data.index >= pd.to_datetime(from_datetime)) &
(data.index <= | pd.to_datetime(to_datetime) | pandas.to_datetime |
import pandas as pd
import glob
import os
import configargparse as argparse
from net_benefit_ascvd.prediction_utils.util import df_dict_concat
parser = argparse.ArgumentParser(
config_file_parser_class=argparse.YAMLConfigFileParser,
)
parser.add_argument(
"--project_dir",
type=str,
required=True
)
parser.add_argument("--task_prefix", type=str, required=True)
parser.add_argument(
"--selected_config_experiment_suffix",
type=str,
default="selected",
)
if __name__ == "__main__":
args = parser.parse_args()
project_dir = args.project_dir
task_prefix = args.task_prefix
def get_config_df(experiment_name):
config_df_path = os.path.join(
os.path.join(
project_dir, "experiments", experiment_name, "config", "config.csv"
)
)
config_df = pd.read_csv(config_df_path)
config_df["config_filename"] = config_df.id.astype(str) + ".yaml"
return config_df
def get_result_df(
experiment_name, output_filename="result_df_group_standard_eval.parquet"
):
baseline_files = glob.glob(
os.path.join(
project_dir, "experiments", experiment_name, "**", output_filename
),
recursive=True,
)
assert len(baseline_files) > 0
baseline_df_dict = {
tuple(file_name.split("/"))[-2]: | pd.read_parquet(file_name) | pandas.read_parquet |
import sys
import copy
import math
import pathlib
import json
import os
import torch
# import joblib
import logging
import numpy as np
import pandas as pd
import hydra
from omegaconf import DictConfig, OmegaConf
import student
import tutor
import utils
logger = logging.getLogger(__name__)
@hydra.main(config_path="config", config_name="config")
def main(cfg : DictConfig) -> None:
save_root = os.getcwd()
current_dir = pathlib.Path(hydra.utils.get_original_cwd())
num_total_ses = cfg.days * cfg.num_sessions_per_day
session_interval = 1 * 24 * 60 * 60 // cfg.num_sessions_per_day
best_score = None
AGENT_FOLDER = os.path.abspath(
os.path.join(save_root, "logs/tutor")
)
STUDENT_LOG_FOLDER = os.path.abspath(
os.path.join(save_root, "logs/study_log")
)
os.makedirs(STUDENT_LOG_FOLDER, exist_ok=True)
RLTUTOR_STUDENT_LOG_FOLDER = STUDENT_LOG_FOLDER + "/RLTutor"
RANDOM_STUDENT_LOG_FOLDER = STUDENT_LOG_FOLDER + "/Random"
LEITNERTUTOR_STUDENT_LOG_FOLDER = STUDENT_LOG_FOLDER + "/LeitnerTutor"
THRESHOLDTUTOR_STUDENT_LOG_FOLDER = STUDENT_LOG_FOLDER + "/ThresholdTutor"
os.makedirs(RLTUTOR_STUDENT_LOG_FOLDER, exist_ok=True)
os.makedirs(RANDOM_STUDENT_LOG_FOLDER, exist_ok=True)
os.makedirs(LEITNERTUTOR_STUDENT_LOG_FOLDER, exist_ok=True)
os.makedirs(LEITNERTUTOR_STUDENT_LOG_FOLDER + "/best_aprob", exist_ok=True)
os.makedirs(THRESHOLDTUTOR_STUDENT_LOG_FOLDER, exist_ok=True)
os.makedirs(THRESHOLDTUTOR_STUDENT_LOG_FOLDER + "/best_thresh", exist_ok=True)
item_skill_mat = utils.make_item_skill_mat(cfg.num_items, cfg.skills, cfg.seed)
pd.DataFrame(item_skill_mat).to_csv(
os.path.join(save_root, "logs/item_%sskill_mat.csv" % str(cfg.skills)),
index=False,
header=False,
)
### 1. RLTutor Tutoring ######
logger.info("Start RLTuor Tutoring.")
now = 0
reward_list_for_plot = []
learned_weight_folder = os.path.join(current_dir, "data/pretrained_weight.pkl")
time_weight = utils.make_time_weight(
num_skills=cfg.skills,
folder_path=learned_weight_folder,
seed=cfg.seed,
)
model = student.DAS3HStudent(time_weight, cfg.num_items, cfg.skills, cfg.seed)
student_model = student.StudentModel(
n_items=cfg.num_items,
n_skills=cfg.skills,
n_wins=cfg.time_windows,
seed=cfg.seed,
item_skill_mat=item_skill_mat,
model=model,
)
def tutoring(num_questions, init_instruction, agent, now):
actions = []
outcomes = []
times = []
each_recall_probs = []
obs = student_model._vectorized_obs()
if init_instruction and agent.name != "RLTutor":
first_actions = pd.read_csv(
RLTUTOR_STUDENT_LOG_FOLDER + "/first_instruction.csv"
)["action"].values.tolist()
assert len(first_actions) == num_questions
for i in range(num_questions):
if i != 0:
now += cfg.interval
if init_instruction and agent.name == "RLTutor":
action = np.random.randint(cfg.num_items)
elif init_instruction and agent.name != "RLTutor":
action = first_actions[i]
else:
if agent.name == "RLTutor":
action = agent.act(obs)
elif agent.name == "LeitnerTutor":
action = agent.act(student_model.curr_item, student_model.curr_outcome)
elif agent.name == "ThresholdTutor":
inner_now = student_model.now
student_model.now = now
action = agent.act()
student_model.now = inner_now
elif agent.name == "RandomTutor":
action = agent.act()
outcome, obs = student_model.step(action, now)
each_recall_prob = student_model.get_retention_rate()
actions.append(action)
outcomes.append(outcome)
times.append(now)
each_recall_probs.append(each_recall_prob)
df = pd.DataFrame(
{
"learner": 1,
"action": actions,
"time": times,
"outcome": outcomes,
}
)
return df, each_recall_probs, now
inner_model = student.InnerModel(
n_items=cfg.num_items,
n_wins=cfg.time_windows,
max_steps=cfg.steps_per_updates,
seed=cfg.seed,
n_items_for_sessions=cfg.num_items_for_each_session,
delay_in_each_session=cfg.interval,
isi=session_interval,
item_skill_mat=item_skill_mat,
result_folder=os.path.join(AGENT_FOLDER, "inner_model"),
lr=cfg.inner_lr,
coef_for_loss_fn=cfg.coefs,
log_prob=cfg.log_reward,
)
rl_tutor = tutor.RLTutor(
env=inner_model,
num_iteration=cfg.updates,
num_envs=cfg.parallel_envs,
num_timesteps=cfg.steps_per_updates,
seed=cfg.seed,
gamma=cfg.gamma,
lambd=cfg.lambd,
value_func_coef=cfg.value_func_coef,
entropy_coef=cfg.entropy_coef,
clip_eps=cfg.clip_eps,
)
with open(STUDENT_LOG_FOLDER + "/student_model_parameters.json", "w") as f:
f.write(
json.dumps(
{
"alpha": student_model.predictor.alpha.tolist(),
"delta": student_model.predictor.delta.tolist(),
"beta": student_model.predictor.beta.tolist(),
"correct": np.vsplit(
student_model.predictor.time_weight.reshape(-1, 5), 2
)[0].tolist(),
"attempt": np.vsplit(
student_model.predictor.time_weight.reshape(-1, 5), 2
)[1].tolist(),
"h": student_model.predictor.h,
"d": student_model.predictor.d,
},
indent=4,
)
)
df, retention_rate_list, now = tutoring(
num_questions=cfg.num_items_for_pre_test, init_instruction=True, agent=rl_tutor, now=now
)
reward_list_for_plot += retention_rate_list
df.to_csv(RLTUTOR_STUDENT_LOG_FOLDER + "/first_instruction.csv", index=False)
df.to_csv(RLTUTOR_STUDENT_LOG_FOLDER + "/study_log.csv", index=False)
inner_model.inner_modeling(
session_log_csv_path=RLTUTOR_STUDENT_LOG_FOLDER + "/first_instruction.csv",
log_csv_path=RLTUTOR_STUDENT_LOG_FOLDER + "/study_log.csv",
)
inner_model.init_session_time = now
train_reward_log = np.zeros((num_total_ses, cfg.updates))
for n in range(num_total_ses):
print()
train_reward_log[n, :] = rl_tutor.train(
session_num=n,
output_dir=os.path.join(AGENT_FOLDER, "rltutor"),
logger=logger,
lr=cfg.lr,
)
print()
now += session_interval
df, retention_rate_list, now = tutoring(
num_questions=cfg.num_items_for_each_session, init_instruction=False, agent=rl_tutor, now=now
)
reward_list_for_plot += retention_rate_list
df.to_csv(RLTUTOR_STUDENT_LOG_FOLDER + "/instruction_%s.csv" % n, index=False)
df.to_csv(
RLTUTOR_STUDENT_LOG_FOLDER + "/study_log.csv",
index=False,
mode="a",
header=False,
)
inner_model.inner_modeling(
session_log_csv_path=RLTUTOR_STUDENT_LOG_FOLDER + "/instruction_%s.csv" % n,
log_csv_path=RLTUTOR_STUDENT_LOG_FOLDER + "/study_log.csv",
)
inner_model.init_session_time = now
state_dict = inner_model.model.state_dict()
with open(STUDENT_LOG_FOLDER + "/RL_inner_model_final_parameters.json", "w") as f:
f.write(
json.dumps(
{
"alpha": state_dict["alpha"].numpy().squeeze().tolist(),
"delta": state_dict["delta"].numpy().squeeze().tolist(),
"beta": state_dict["beta"].numpy().squeeze().tolist(),
"correct": state_dict["correct"].numpy().squeeze().tolist(),
"attempt": state_dict["attempt"].numpy().squeeze().tolist(),
},
indent=4,
)
)
column = ["item%s" % str(n + 1) for n in range(cfg.num_items)]
result_df = | pd.DataFrame(reward_list_for_plot, columns=column) | pandas.DataFrame |
import requests
import json
import traceback
import sqlite3
import server.app.decode_fbs as decode_fbs
import scanpy as sc
import anndata as ad
import pandas as pd
import numpy as np
import diffxpy.api as de
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import matplotlib.patches as mpatches
from matplotlib import rcParams
import plotly.graph_objects as go
import plotly.io as plotIO
import base64
import math
from io import BytesIO
import sys
import time
import os
import re
import glob
import subprocess
strExePath = os.path.dirname(os.path.abspath(__file__))
import pprint
ppr = pprint.PrettyPrinter(depth=6)
import server.compute.diffexp_generic as diffDefault
import pickle
from pyarrow import feather
sys.setrecursionlimit(10000)
sc.settings.verbosity = 2
rcParams.update({'figure.autolayout': True})
api_version = "/api/v0.2"
import threading
jobLock = threading.Lock()
def getLock(lock):
while not lock.acquire():
time.sleep(1.0)
def freeLock(lock):
lock.release()
def route(data,appConfig):
#ppr.pprint("current working dir:%s"%os.getcwd())
data = initialization(data,appConfig)
#ppr.pprint(data)
try:
getLock(jobLock)
taskRes = distributeTask(data["method"])(data)
freeLock(jobLock)
return taskRes
except Exception as e:
freeLock(jobLock)
return 'ERROR @server: '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))
#return distributeTask(data["method"])(data)
import server.app.app as app
def initialization(data,appConfig):
# obtain the server host information
data = json.loads(str(data,encoding='utf-8'))
# update the environment information
data.update(VIPenv)
# updatting the hosting data information
if appConfig.is_multi_dataset():
data["url_dataroot"]=appConfig.server_config.multi_dataset__dataroot['d']['base_url']
data['h5ad']=os.path.join(appConfig.server_config.multi_dataset__dataroot['d']['dataroot'], data["dataset"])
else:
data["url_dataroot"]=None
data["dataset"]=None
data['h5ad']=appConfig.server_config.single_dataset__datapath
# setting the plotting options
if 'figOpt' in data.keys():
setFigureOpt(data['figOpt'])
# get the var (gene) and obv index
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
data['obs_index'] = scD.get_schema()["annotations"]["obs"]["index"]
data['var_index'] = scD.get_schema()["annotations"]["var"]["index"]
return data
def setFigureOpt(opt):
sc.set_figure_params(dpi_save=int(opt['dpi']),fontsize= float(opt['fontsize']),vector_friendly=(opt['vectorFriendly'] == 'Yes'),transparent=(opt['transparent'] == 'Yes'),color_map=opt['colorMap'])
rcParams.update({'savefig.format':opt['img']})
def getObs(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## obtain the category annotation
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
selAnno = [data['obs_index']]+data['grp']
dAnno = list(scD.get_obs_keys())
anno = []
sel = list(set(selAnno)&set(dAnno))
if len(sel)>0:
tmp = scD.data.obs.loc[selC,sel].astype('str')
tmp.index = cNames
anno += [tmp]
sel = list(set(selAnno)-set(dAnno))
if len(sel)>0:
annotations = scD.dataset_config.user_annotations
if annotations:
labels = annotations.read_labels(scD)
tmp = labels.loc[list(scD.data.obs.loc[selC,data['obs_index']]),sel]
tmp.index = cNames
anno += [tmp]
obs = pd.concat(anno,axis=1)
#ppr.pprint(obs)
## update the annotation Abbreviation
combUpdate = cleanAbbr(data)
if 'abb' in data.keys():
for i in data['grp']:
obs[i] = obs[i].map(data['abb'][i])
return combUpdate, obs
def getObsNum(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## obtain the category annotation
obs = pd.DataFrame()
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
selAnno = data['grpNum']
dAnno = list(scD.get_obs_keys())
sel = list(set(selAnno)&set(dAnno))
if len(sel)>0:
obs = scD.data.obs.loc[selC,sel]
obs.index = cNames
return obs
def getVar(data):
## obtain the gene annotation
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
gInfo = scD.data.var
gInfo.index = list(gInfo[data['var_index']])
gInfo = gInfo.drop([data['var_index']],axis=1)
return gInfo
def collapseGeneSet(data,expr,gNames,cNames,fSparse):
Y = expr
if 'geneGrpColl' in data.keys() and not data['geneGrpColl']=='No' and 'geneGrp' in data.keys() and len(data['geneGrp'])>0:
data['grpLoc'] = []
data['grpID'] = []
if fSparse:
Y = pd.DataFrame.sparse.from_spmatrix(Y,columns=gNames,index=cNames)
for aN in data['geneGrp'].keys():
if data['geneGrpColl']=='mean':
Y = pd.concat([Y,Y[data['geneGrp'][aN]].mean(axis=1).rename(aN)],axis=1,sort=False)
if data['geneGrpColl']=='median':
Y = pd.concat([Y,Y[data['geneGrp'][aN]].median(axis=1).rename(aN)],axis=1,sort=False)
for gene in data['geneGrp'][aN]:
if gene in data['genes']:
data['genes'].remove(gene)
data['genes'] += [aN]
gNames = list(Y.columns)
return Y,gNames
def createData(data):
selC = list(data['cells'].values())
cNames = ["cell%d" %i for i in selC]
## onbtain the expression matrix
gNames = []
expr = []
fSparse = False
X = []
if 'genes' in data.keys():
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
if not type(scD.data.X) is np.ndarray:
fSparse = True
if len(data['genes'])>0:
fullG = list(scD.data.var[data['var_index']])
selG = sorted([fullG.index(i) for i in data['genes']]) #when data loaded backed, incremental is required
X = scD.data.X[:,selG]
gNames = [fullG[i] for i in selG] #data['genes']
else:
X = scD.data.X
gNames = list(scD.data.var[data['var_index']])
if 'figOpt' in data.keys() and data['figOpt']['scale'] == 'Yes':
X = sc.pp.scale(X,zero_center=(data['figOpt']['scaleZero'] == 'Yes'),max_value=(float(data['figOpt']['scaleMax']) if data['figOpt']['clipValue']=='Yes' else None))
X = X[selC]
if fSparse:
expr = X
else:
expr = pd.DataFrame(X,columns=gNames,index=cNames)
expr,gNames = collapseGeneSet(data,expr,gNames,cNames,fSparse)
#ppr.pprint("finished expression ...")
## obtain the embedding
embed = {}
if 'layout' in data.keys():
layout = data['layout']
if isinstance(layout,str):
layout = [layout]
if len(layout)>0:
for one in layout:
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
embed['X_%s'%one] = pd.DataFrame(scD.data.obsm['X_%s'%one][selC][:,[0,1]],columns=['%s1'%one,'%s2'%one],index=cNames)
#ppr.pprint("finished layout ...")
## obtain the category annotation
combUpdate, obs = getObs(data)
## create a custom annotation category and remove cells which are not in the selected annotation
if combUpdate and len(data['grp'])>1:
newGrp = 'Custom_combine'
combineGrp = list(data['combine'].keys());
obs[newGrp] = obs[combineGrp[0]]
for i in combineGrp:
if not i==combineGrp[0]:
obs[newGrp] += ":"+obs[i]
selC = ~obs[newGrp].str.contains("Other").to_numpy()
expr = expr[selC]
for i in embed.keys():
embed[i] = embed[i][selC]
obs = obs[selC].astype('category')
obs[newGrp].cat.set_categories(data['combineOrder'],inplace=True)
data['grp'] = [newGrp]
obs = obs.astype('category')
## empty selection
if expr.shape[0]==0 or expr.shape[1]==0:
return []
#ppr.pprint("finished obv ...")
return sc.AnnData(expr,obs,var=pd.DataFrame([],index=gNames),obsm={layout:embed[layout].to_numpy() for layout in embed.keys()})
def cleanAbbr(data):
updated = False
if 'abb' in data.keys() and 'combine' in data.keys():
if len(data['combine'])>0:
updated = True
for cate in data['abb'].keys():
if cate in data['combine'].keys():
for anName in data['abb'][cate].keys():
if not anName in data['combine'][cate]:
data['abb'][cate][anName] = "Other";
else:
if not data['abb'][cate][anName]==anName:
data['combineOrder'] = [one.replace(anName,data['abb'][cate][anName]) for one in data['combineOrder']]
else:
data['abb'][cate] = {key:"Other" for key in data['abb'][cate].keys()}
return updated
def errorTask(data):
raise ValueError('Error task!')
def distributeTask(aTask):
return {
'SGV':SGV,
'SGVcompare':SGVcompare,
'PGV':PGV,
'VIOdata':VIOdata,
'HEATplot':pHeatmap,
'HEATdata':HeatData,
'GD':GD,
'DEG':DEG,
'DOT':DOT,
'EMBED':EMBED,
'TRAK':TRACK,
'DUAL':DUAL,
'MARK': MARK,
'MINX':MINX,
'DENS':DENS,
'DENS2D':DENS2D,
'SANK':SANK,
'STACBAR':STACBAR,
'HELLO':HELLO,
'CLI':CLI,
'preDEGname':getPreDEGname,
'preDEGvolcano':getPreDEGvolcano,
'preDEGmulti':getPreDEGbubble,
'mergeMeta': mergeMeta,
'isMeta': isMeta,
'testVIPready':testVIPready,
'Description':getDesp,
'GSEAgs':getGSEA,
'SPATIAL':SPATIAL,
'saveTest':saveTest,
'getBWinfo':getBWinfo,
'plotBW':plotBW
}.get(aTask,errorTask)
def HELLO(data):
return 'Hi, connected.'
def iostreamFig(fig):
#getLock(iosLock)
figD = BytesIO()
#ppr.pprint('io located at %d'%int(str(figD).split(" ")[3].replace(">",""),0))
fig.savefig(figD,bbox_inches="tight")
#ppr.pprint(sys.getsizeof(figD))
#ppr.pprint('io located at %d'%int(str(figD).split(" ")[3].replace(">",""),0))
imgD = base64.encodebytes(figD.getvalue()).decode("utf-8")
figD.close()
#ppr.pprint("saved Fig")
#freeLock(iosLock)
if 'matplotlib' in str(type(fig)):
plt.close(fig)#'all'
return imgD
def Msg(msg):
fig = plt.figure(figsize=(5,2))
plt.text(0,0.5,msg)
ax = plt.gca()
ax.axis('off')
return iostreamFig(fig)
def SPATIAL(data):
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
#ppr.pprint(vars(scD.data.uns["spatial"]))
spatial=scD.data.uns["spatial"]
if (data['embedding'] == "get_spatial_list"):
return json.dumps({'list':list(spatial)})
library_id=list(spatial)[0]
if (data['embedding'] in list(spatial)):
library_id=data['embedding']
height, width, depth = spatial[library_id]["images"][data['resolution']].shape
embedding = 'X_'+data['embedding']
spatialxy = scD.data.obsm[embedding]
tissue_scalef = spatial[library_id]['scalefactors']['tissue_' + data['resolution'] + '_scalef']
i = data['spots']['spoti_i']
x = 0
y = 1
# from original embedding to (0,1) coordinate system (cellxgene embedding)
scalex = (data['spots']['spot0_x'] - data['spots']['spoti_x']) / (spatialxy[0][x] - spatialxy[i][x])
scaley = (data['spots']['spot0_y'] - data['spots']['spoti_y']) / (spatialxy[0][y] - spatialxy[i][y])
# image is in (-1,0,1) coordinate system, so multiplied by 2
translatex = (spatialxy[i][x]*scalex - data['spots']['spoti_x']) * 2
translatey = (spatialxy[i][y]*scaley - data['spots']['spoti_y']) * 2
scale = 1/tissue_scalef * scalex * 2
# Addtional translate in Y due to flipping of the image if needed
ppr.pprint(scalex)
ppr.pprint(scaley)
ppr.pprint(translatex)
ppr.pprint(translatey)
# from (-1,0,1) (image layer) to (0,1) coordinate system (cellxgene embedding). Overlapping (0,0) origins of both.
translatex = -(1+translatex)
if (translatey > -0.1):
flip = True
translatey = -(1+translatey) + height*scale
else:
flip = False
translatey = -(1+translatey)
returnD = [{'translatex':translatex,'translatey':translatey,'scale':scale}]
dpi=100
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
if (flip):
ax.imshow(np.flipud(spatial[library_id]["images"][data['resolution']]), interpolation='nearest')
else:
ax.imshow(spatial[library_id]["images"][data['resolution']], interpolation='nearest')
figD = BytesIO()
plt.savefig(figD, dpi=dpi)
ppr.pprint(sys.getsizeof(figD))
imgD = base64.encodebytes(figD.getvalue()).decode("utf-8")
figD.close()
plt.close(fig)
return json.dumps([returnD, imgD])
def MINX(data):
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
minV = min(scD.data.X[0])
return '%.1f'%minV
def geneFiltering(adata,cutoff,opt):
## 1. remove cells if the max expression of all genes is lower than the cutoff
if opt==1:
#sT = time.time()
#ix = adata.to_df().apply(lambda x: max(x)>float(cutoff),axis=1)
#ppr.pprint(time.time()-sT)
#sT=time.time()
df = adata.to_df()
ix = df[df>float(cutoff)].count(axis=1)>0
#ppr.pprint(time.time()-sT)
#sT = time.time()
#ix = pd.DataFrame((adata.X>float(cutoff)).sum(1)>0,index=list(adata.obs.index)).iloc[:,0]
#ppr.pprint(time.time()-sT)
adata = adata[ix,]
## 2. Set all expression level smaller than the cutoff to be NaN not for plotting without removing any cells
elif opt==2:
def cutoff(x):
return x if x>float(cutoff) else None
X = adata.to_df()
X=X.applymap(cutoff)
adata = sc.AnnData(X,adata.obs)
return adata
def SGV(data):
# figure width and heights depends on number of unique categories
# characters of category names, gene number
#ppr.pprint("SGV: creating data ...")
adata = createData(data)
#ppr.pprint("SGV: data created ...")
adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
a = list(set(list(adata.obs[data['grp'][0]])))
ncharA = max([len(x) for x in a])
w = len(a)/4+1
h = ncharA/6+2.5
ro = math.acos(10/max([15,ncharA]))/math.pi*180
##
fig = plt.figure(figsize=[w,h])
sc.pl.violin(adata,data['genes'],groupby=data['grp'][0],ax=fig.gca(),show=False)
fig.autofmt_xdate(bottom=0.2,rotation=ro,ha='right')
return iostreamFig(fig)
def SGVcompare(data):
adata = createData(data)
#adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
# plot in R
strF = ('%s/SGV%f.csv' % (data["CLItmp"],time.time()))
X=pd.concat([adata.to_df(),adata.obs[data['grp']]],axis=1,sort=False)
X[X.iloc[:,0]>=float(data['cellCutoff'])].to_csv(strF,index=False)
strCMD = " ".join(["%s/Rscript"%data['Rpath'],strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']])
#ppr.pprint(strCMD)
res = subprocess.run([strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#
img = res.stdout.decode('utf-8')
os.remove(strF)
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in R: "+res.stderr.decode('utf-8'))
return img
def VIOdata(data):
adata = createData(data)
adata = geneFiltering(adata,data['cutoff'],1)
if len(adata)==0:
raise ValueError('No cells in the condition!')
return pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
def unique(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def updateGene(data):
grpID = []
grpLoc=[]
allG = []
if 'geneGrp' in data.keys():
for aN in data['geneGrp'].keys():
grpLoc += [(len(allG),len(allG)+len(data['geneGrp'][aN])-1)]
allG += data['geneGrp'][aN]
grpID += [aN]
data['genes'] = unique(allG+data['genes'])
data['grpLoc'] = grpLoc
data['grpID'] = grpID
def PGV(data):
# figure width and heights depends on number of unique categories
# characters of category names, gene number #pecam1 pdpn
updateGene(data)
#ppr.pprint("PGV: creating data ...")
adata = createData(data)
#ppr.pprint("PGV: data created ...")
adata = geneFiltering(adata,data['cutoff'],1)
if adata.shape[0]==0 or adata.shape[1]==0:
return Msg('No cells in the condition!')
a = list(set(list(adata.obs[data['grp'][0]])))
ncharA = max([len(x) for x in a])
w = max([3,ncharA/8])+len(data['genes'])/2+1.5
h = len(a)+0.5
swapAx = False
##
if data['by']=='Columns':
a = w
w = h
h = a
swapAx = True
if 'split_show' in data['figOpt']['scanpybranch']: #.dev140+ge9cbc5f
vp = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],return_fig=True,figsize=(w,h),swap_axes=swapAx,var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])
vp.add_totals().style(yticklabels=True, cmap=data['color']).show()
#vp.add_totals().show()
fig = vp#plt.gcf()
else:
fig = plt.figure(figsize=[w,h])
axes = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],show=False,ax=fig.gca(),swap_axes=swapAx,
var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])
return iostreamFig(fig)
def pHeatmap(data):
# figure width is depends on the number of categories was choose to show
# and the character length of each category term
# if the number of element in a category is smaller than 10, "Set1" or "Set3" is choosen
# if the number of element in a category is between 10 and 20, default is choosen
# if the number of element in a category is larger than 20, husl is choosen
#Xsep = createData(data,True)
#adata = sc.AnnData(Xsep['expr'],Xsep['obs'])
#sT = time.time()
adata = createData(data)
data['grp'] += data['addGrp']
#Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
#ppr.pprint('HEAT data reading cost %f seconds' % (time.time()-sT) )
#sT = time.time()
exprOrder = True
if data['order']!="Expression":
exprOrder = False;
adata = adata[adata.obs.sort_values(data['order']).index,]
#s = adata.obs[data['order']]
#ix = sorted(range(len(s)), key=lambda k: s[k])
#adata = adata[ix,]
colCounter = 0
colName =['Set1','Set3']
grpCol = list()
grpLegend = list()
grpWd = list()
grpLen = list()
h = 8
w = len(data['genes'])/3+0.3
for gID in data['grp']:
grp = adata.obs[gID]
Ugrp = grp.unique()
if len(Ugrp)<10:
lut = dict(zip(Ugrp,sns.color_palette(colName[colCounter%2],len(Ugrp)).as_hex()))
colCounter += 1
elif len(Ugrp)<20:
lut = dict(zip(Ugrp,sns.color_palette(n_colors=len(Ugrp)).as_hex()))
else:
lut = dict(zip(Ugrp,sns.color_palette("husl",len(Ugrp)).as_hex()))
grpCol.append(grp.map(lut))
grpLegend.append([mpatches.Patch(color=v,label=k) for k,v in lut.items()])
grpWd.append(max([len(x) for x in Ugrp]))#0.02*fW*max([len(x) for x in Ugrp])
grpLen.append(len(Ugrp)+2)
w += 2
Zscore=None
heatCol=data['color']
heatCenter=None
colTitle="Expression"
if data['norm']=='zscore':
Zscore=1
#heatCol="vlag"
heatCenter=0
colTitle="Z-score"
#ppr.pprint('HEAT data preparing cost %f seconds' % (time.time()-sT) )
#sT = time.time()
try:
g = sns.clustermap(adata.to_df(),
method="ward",row_cluster=exprOrder,z_score=Zscore,cmap=heatCol,center=heatCenter,
row_colors=pd.concat(grpCol,axis=1).astype('str'),yticklabels=False,xticklabels=True,
figsize=(w,h),colors_ratio=0.05,
cbar_pos=(.3, .95, .55, .02),
cbar_kws={"orientation": "horizontal","label": colTitle,"shrink": 0.5})
except Exception as e:
return 'ERROR: Z score calculation failed for 0 standard diviation. '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))
#ppr.pprint('HEAT plotting cost %f seconds' % (time.time()-sT) )
#sT = time.time()
g.ax_col_dendrogram.set_visible(False)
#g.ax_row_dendrogram.set_visible(False)
plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
grpW = [1.02]
grpH = [1.2]
cumulaN = 0
cumulaMax = 0
characterW=1/40 # a character is 1/40 of heatmap width
characterH=1/40 # a character is 1/40 of heatmap height
for i in sorted(range(len(grpLen)),key=lambda k:grpLen[k]):#range(5):#
cumulaN += grpLen[i]
if cumulaN>(10+1/characterH):
grpW.append(grpW[-1]+cumulaMax)
grpH = [1.2]
cumulaN =0
cumulaMax=0
leg = g.ax_heatmap.legend(handles=grpLegend[i],frameon=True,title=data['grp'][i],loc="upper left",
bbox_to_anchor=(grpW[-1],grpH[-1]),fontsize=5)#grpW[i],0.5,0.3
#leg = g.ax_heatmap.legend(handles=grpLegend[0],frameon=True,title=data['grp'][0],loc="upper left",
# bbox_to_anchor=(1.02,1-i*0.25),fontsize=5)#grpW[i],0.5,0.
cumulaMax = max([cumulaMax,grpWd[i]*characterW])
grpH.append(grpH[-1]-grpLen[i]*characterH)
leg.get_title().set_fontsize(6)#min(grpSize)+2
g.ax_heatmap.add_artist(leg)
#ppr.pprint('HEAT post plotting cost %f seconds' % (time.time()-sT) )
return iostreamFig(g)#json.dumps([iostreamFig(g),Xdata])#)#
def HeatData(data):
adata = createData(data)
Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()
return Xdata
def GD(data):
adata = None;
for one in data['cells'].keys():
#sT = time.time()
oneD = data.copy()
oneD.update({'cells':data['cells'][one],
'genes':[],
'grp':[]})
D = createData(oneD)
#ppr.pprint("one grp aquire data cost %f seconds" % (time.time()-sT))
D.obs['cellGrp'] = one
if adata is None:
adata = D
else:
#sT =time.time()
adata = adata.concatenate(D)
#ppr.pprint("Concatenate data cost %f seconds" % (time.time()-sT))
if adata is None:
return Msg("No cells were satisfied the condition!")
##
adata.obs.astype('category')
cutOff = 'geneN_cutoff'+data['cutoff']
#sT = time.time()
#adata.obs[cutOff] = adata.to_df().apply(lambda x: sum(x>float(data['cutoff'])),axis=1)
#ppr.pprint(time.time()-sT)
#sT = time.time()
#df = adata.to_df()
#adata.obs[cutOff] = df[df>float(data['cutoff'])].count(axis=1)
#ppr.pprint(time.time()-sT)
sT = time.time()
adata.obs[cutOff] = (adata.X >float(data['cutoff'])).sum(1)
ppr.pprint(time.time()-sT)
##
w = 3
if len(data['cells'])>1:
w += 3
fig = plt.figure(figsize=[w,4])
sc.pl.violin(adata,cutOff,groupby='cellGrp',ax=fig.gca(),show=False,rotation=0,size=2)
return iostreamFig(fig)
def getGSEA(data):
strGSEA = '%s/gsea/'%strExePath
return json.dumps(sorted([os.path.basename(i).replace(".symbols.gmt","") for i in glob.glob(strGSEA+"*.symbols.gmt")]))
def DEG(data):
adata = None;
genes = data['genes']
data['genes'] = []
comGrp = 'cellGrp'
if 'combine' in data.keys():
if data['DEmethod']=='default':
combUpdate, obs = getObs(data)
if combUpdate and len(data['grp'])>1:
obs[comGrp] = obs[data['grp'][0]]
for i in data['grp']:
if i!=data['grp'][0]:
obs[comGrp] += ":"+obs[i]
mask = [obs[comGrp].isin([data['comGrp'][i]]) for i in [0,1]]
else:
data['figOpt']['scale'] = 'No'
adata = createData(data)
comGrp = data['grp'][0]
adata = adata[adata.obs[comGrp].isin(data['comGrp'])]
else:
mask = [pd.Series(range(data['cellN'])).isin(data['cells'][one].values()) for one in data['comGrp']]
for one in data['comGrp']:
oneD = data.copy()
oneD['cells'] = data['cells'][one]
oneD['genes'] = []
oneD['grp'] = []
oneD['figOpt']['scale']='No'
#oneD = {'cells':data['cells'][one],
# 'genes':[],
# 'grp':[],
# 'figOpt':{'scale':'No'},
# 'url':data['url']}
D = createData(oneD)
D.obs[comGrp] = one
if adata is None:
adata = D
else:
adata = adata.concatenate(D)
if data['DEmethod']=='default':
if sum(mask[0]==True)<10 or sum(mask[1]==True)<10:
raise ValueError('Less than 10 cells in a group!')
with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:
res = diffDefault.diffexp_ttest(scD,mask[0].to_numpy(),mask[1].to_numpy(),scD.data.shape[1])# shape[cells as rows, genes as columns]
gNames = list(scD.data.var[data['var_index']])
deg = pd.DataFrame(res,columns=['gID','log2fc','pval','qval'])
gName = pd.Series([gNames[i] for i in deg['gID']],name='gene')
deg = pd.concat([deg,gName],axis=1).loc[:,['gene','log2fc','pval','qval']]
else:
if not 'AnnData' in str(type(adata)):
raise ValueError('No data extracted by user selection')
adata.obs.astype('category')
nm = None
if data['DEmethod']=='wald':
nm = 'nb'
if data['DEmethod']=='wald':
res = de.test.wald(adata,formula_loc="~1+"+comGrp,factor_loc_totest=comGrp)
elif data['DEmethod']=='t-test':
res = de.test.t_test(adata,grouping=comGrp)
elif data['DEmethod']=='rank':
res = de.test.rank_test(adata,grouping=comGrp)
else:
raise ValueError('Unknown DE methods:'+data['DEmethod'])
#res = de.test.two_sample(adata,comGrp,test=data['DEmethod'],noise_model=nm)
deg = res.summary()
deg = deg.sort_values(by=['qval']).loc[:,['gene','log2fc','pval','qval']]
deg['log2fc'] = -1 * deg['log2fc']
## plot in R
#strF = ('/tmp/DEG%f.csv' % time.time())
strF = ('%s/DEG%f.csv' % (data["CLItmp"],time.time()))
deg.to_csv(strF,index=False)
#ppr.pprint([strExePath+'/volcano.R',strF,'"%s"'%';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0]])
res = subprocess.run([strExePath+'/volcano.R',strF,';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0],str(data['sigFDR']),str(data['sigFC']),data['Rlib']],capture_output=True)#
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in volcano.R: "+res.stderr.decode('utf-8'))
img = res.stdout.decode('utf-8')
# GSEA
GSEAimg=""
GSEAtable=pd.DataFrame()
if data['gsea']['enable']:
res = subprocess.run([strExePath+'/fgsea.R',
strF,
'%s/gsea/%s.symbols.gmt'%(strExePath,data['gsea']['gs']),
str(data['gsea']['gsMin']),
str(data['gsea']['gsMax']),
str(data['gsea']['padj']),
data['gsea']['up'],
data['gsea']['dn'],
str(data['gsea']['collapse']),
data['figOpt']['img'],
str(data['figOpt']['fontsize']),
str(data['figOpt']['dpi']),
data['Rlib']],capture_output=True)#
if 'Error' in res.stderr.decode('utf-8'):
raise SyntaxError("in fgsea.R: "+res.stderr.decode('utf-8'))
GSEAimg = res.stdout.decode('utf-8')
GSEAtable = | pd.read_csv(strF) | pandas.read_csv |
from numpy import *
import pandas as pd
import datetime
from datetime import timedelta
def sum_duplicated():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
x=pd.read_csv("data/train_2011_2012_2013.csv", sep=";", usecols=fields) # LECTURE
pd.DataFrame(x.groupby(('ASS_ASSIGNMENT', 'DATE', 'WEEK_END', 'DAY_WE_DS'), squeeze =False).sum()).to_csv("data/trainPure.csv", sep=';', encoding='utf_8')
def preprocessTOTAL(selectAss):
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
x=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
#################################################" Pour X
if(selectAss != False):#selection
x = x[x['ASS_ASSIGNMENT'] == selectAss]
del x['ASS_ASSIGNMENT']
x['YEAR'] = x['DATE'].str[0:4]
x['MONTH'] = x['DATE'].str[5:7]
x['DAY'] = x['DATE'].str[8:10]
x['HOUR'] = x['DATE'].str[-12:-8]
x['DATE'] = pd.to_datetime(x['DAY']+'/'+x['MONTH']+'/'+x['YEAR'])
##############pour avoir le call de 7jours avant en 's7'
tmp = pd.DataFrame()
tmp['HOUR'] = x['HOUR']
tmp['DATE'] = x['DATE']- timedelta(days=7)
#tmp.join(x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['DATE','HOUR'], how='left')
tmp[['DATE','HOUR', 's7' ]]=pd.merge(tmp[['DATE','HOUR']],x[['DATE','HOUR', 'CSPL_RECEIVED_CALLS' ]], on=['HOUR', 'DATE'], how='left')
x=pd.concat([x, tmp['s7']], axis=1)
x['s7'][ | pd.isnull(x['s7']) | pandas.isnull |
'''
Append/Concatenate all of the separate trailers csvs into one csv
'''
import pandas as pd
import argparse
import os
def main():
'''
locates all csv files in the specified input folder and concatenates
them together
'''
parser = argparse.ArgumentParser(description='Concatenate trailer databases')
parser.add_argument('input', type=str, help="Folder containing \
trailers_2010s_{index}.csv files")
parser.add_argument('output', type=str, help="Location to save \
trailers_2010s.csv")
args = parser.parse_args()
trailers_csvs = [f for f in os.listdir(args.input) if f[:8] == "trailers"]
trailers_2010s = | pd.DataFrame() | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
pa_version_under2p0,
pa_version_under4p0,
)
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
def test_startswith_endswith_non_str_patterns(pattern):
# GH3485
ser = Series(["foo", "bar"])
msg = f"expected a string object, not {type(pattern).__name__}"
with pytest.raises(TypeError, match=msg):
ser.str.startswith(pattern)
with pytest.raises(TypeError, match=msg):
ser.str.endswith(pattern)
def assert_series_or_index_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else: # Index
tm.assert_index_equal(left, right)
def test_iter():
# GH3638
strs = "google", "wikimedia", "wikipedia", "wikitravel"
ser = Series(strs)
with tm.assert_produces_warning(FutureWarning):
for s in ser.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ser.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, str) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == "l"
def test_iter_empty(any_string_dtype):
ser = Series([], dtype=any_string_dtype)
i, s = 100, 1
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(any_string_dtype):
ser = Series(["a"], dtype=any_string_dtype)
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert not i
tm.assert_series_equal(ser, s)
def test_iter_object_try_string():
ser = Series(
[
slice(None, np.random.randint(10), np.random.randint(10, 20))
for _ in range(4)
]
)
i, s = 100, "h"
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert i == 100
assert s == "h"
# test integer/float dtypes (inferred by constructor) and mixed
def test_count(any_string_dtype):
ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype)
result = ser.str.count("f[o]+")
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
expected = Series([1, 2, np.nan, 4], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_count_mixed_object():
ser = Series(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
result = ser.str.count("a")
expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_repeat(any_string_dtype):
ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype)
result = ser.str.repeat(3)
expected = Series(
["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
result = ser.str.repeat([1, 2, 3, 4, 5, 6])
expected = Series(
["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
def test_repeat_mixed_object():
ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = ser.str.repeat(3)
expected = Series(
["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]])
def test_repeat_with_null(any_string_dtype, arg, repeat):
# GH: 31632
ser = Series(["a", arg], dtype=any_string_dtype)
result = ser.str.repeat([3, repeat])
expected = Series(["aaa", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_empty_str_methods(any_string_dtype):
empty_str = empty = Series(dtype=any_string_dtype)
if any_string_dtype == "object":
empty_int = Series(dtype="int64")
empty_bool = Series(dtype=bool)
else:
empty_int = Series(dtype="Int64")
empty_bool = Series(dtype="boolean")
empty_object = Series(dtype=object)
empty_bytes = Series(dtype=object)
empty_df = DataFrame()
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert "" == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.contains("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.startswith("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.endswith("a"))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.replace("a", "b"))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.match("^a"))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=any_string_dtype),
empty.str.extract("()", expand=True),
)
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=True),
)
tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False))
tm.assert_frame_equal(
| DataFrame(columns=[0, 1], dtype=any_string_dtype) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 16:44:24 2020
@author: Borja
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
- Ultra Trail Mont Blanc. Clasificación desde 2003 hasta 2017.
https://www.kaggle.com/ceruleansea/ultratrail-du-montblanc-20032017?select=utmb_2017.csv
-Ultra Trail Mont Blanc, Clasificación desde 2017 hasta 2019.
https://www.kaggle.com/purpleyupi/utmb-results
Datos guardados en 'Data/csv/*.csv'
"""
utmb_2003 = pd.read_csv('Data/csv/utmb_2003.csv', sep=',', decimal='.')
utmb_2004 = pd.read_csv('Data/csv/utmb_2004.csv', sep=',', decimal='.')
utmb_2005 = pd.read_csv('Data/csv/utmb_2005.csv', sep=',', decimal='.')
utmb_2006 = pd.read_csv('Data/csv/utmb_2006.csv', sep=',', decimal='.')
utmb_2007 = pd.read_csv('Data/csv/utmb_2007.csv', sep=',', decimal='.')
utmb_2008 = pd.read_csv('Data/csv/utmb_2008.csv', sep=',', decimal='.')
utmb_2009 = pd.read_csv('Data/csv/utmb_2009.csv', sep=',', decimal='.')
utmb_2010 = pd.read_csv('Data/csv/utmb_2010.csv', sep=',', decimal='.')
utmb_2011 = pd.read_csv('Data/csv/utmb_2011.csv', sep=',', decimal='.')
utmb_2012 = pd.read_csv('Data/csv/utmb_2012.csv', sep=',', decimal='.')
utmb_2013 = | pd.read_csv('Data/csv/utmb_2013.csv', sep=',', decimal='.') | pandas.read_csv |
import pandas as pd
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
json_data = "https://data.nasa.gov/resource/y77d-th95.json"
df_nasa = | pd.read_json(json_data) | pandas.read_json |
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import os
import pickle
import pandas as pd
from glob import glob
import numpy as np
# import dask.dataframe as dd
from tqdm import tqdm
import umap
from skimage.io import imsave
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from useful_wsi import get_image
def check_or_create(path):
"""
If path exists, does nothing otherwise it creates it.
Parameters
----------
path: string, path for the creation of the folder to check/create
"""
if not os.path.isdir(path):
os.makedirs(path)
def options_parser():
import argparse
parser = argparse.ArgumentParser(
description='Creating heatmap')
parser.add_argument('--path', required=False, default="*.csv",
metavar="str", type=str,
help='path')
parser.add_argument('--n_components', required=True,
metavar="int", type=int,
help='2 or 3, 2 being umap to 2 + cell counts and 3 being umap to 3')
parser.add_argument('--downsample_patient', required=False,
metavar="int", type=int,
help='downsampling rate for each patient individually')
parser.add_argument('--downsample_whole', required=False,
metavar="int", type=int,
help='downsampling rate for the table as a whole after regroup everyone and how')
parser.add_argument('--how', required=True, default="min",
metavar="str", type=str,
help='method to balance once you have everyone')
parser.add_argument('--balance', required=False, default=0,
metavar="int", type=int,
help='if to balance each patient')
parser.add_argument('--pca', required=False, default=0,
metavar="int", type=int,
help='if to use PCA or not')
parser.add_argument('--plotting', required=False, default=0,
metavar="int", type=int,
help='if to plot..')
args = parser.parse_args()
args.balancing = args.balance == 1
args.use_PCA = args.pca == 1
args.plotting = args.plotting == 1
if args.use_PCA:
print('Using PCA')
return args
def normalise_csv(f_csv, normalise=True, downsampling=1):
"""
Loading function for the cell csv tables.
Parameters
----------
fcsv: string,
csv to load.
normalise: bool,
whether to normalise by the mean and standard deviation of the current csv table.
downsampling: int,
factor by which to downsample.
Returns
-------
A tuple of tables, the first being the features and the second the information relative to the line.
"""
xcel = pd.read_csv(f_csv, index_col=0)
feat = xcel.columns[:-6]
info = xcel.columns[-6:]
xcel_feat = xcel.loc[:, feat]
xcel_info = xcel.loc[:, info]
if normalise:
for el in feat:
xcel_feat_mean = xcel_feat.loc[:, el].mean()
xcel_feat_std = xcel_feat.loc[:, el].std()
xcel_feat[el] = (xcel_feat.loc[:, el] - xcel_feat_mean) / xcel_feat_std
if downsampling != 1:
sample = np.random.choice(xcel_feat.index, size=xcel_feat.shape[0] // downsampling, replace=False)
xcel_feat = xcel_feat.ix[sample]
xcel_info = xcel_info.ix[sample]
return xcel_feat, xcel_info
def collect_files(prefix="data/*.csv", downsampling=1):
"""
Collect and opens files.
Parameters
----------
prefix: string,
folder where to collect the cell csv tables.
downsampling: int,
factor by which to downsample.
Returns
-------
dictionnary where each file name is associate to its cell table.
"""
files = {}
for f in tqdm(glob(prefix)):
f_name = os.path.basename(f).split('.')[0]
files[f_name] = normalise_csv(f, normalise=True, downsampling=downsampling)
return files
def load_all(f, filter_out="LBP", balance=True, how="min", downsampling=1):
"""
Loading function for the cell csv tables.
Parameters
----------
f: dictionnary,
where each key represents a tissue and each item a feature table.
filter_out: str,
String pattern to filter out columns from the feature table, in 'glob' form.
If pattern in the feature name, exclude feature.
balance: bool,
Whether to balance the number of cell per patients with 'how' method.
how: str,
The method for balancing:
- min, look at the smallest amount of cell in one patient, use this as
number of sample to pick from each patients.
- minthresh, same as before expect you cap the minimum in case this one
would go to low..
Returns
-------
A tuple of tables, the first being the concatenate features table of all patients.
The second being the concatenated information relative to each cell of each patient.
"""
tables = []
tables_f2 = []
for k in f.keys():
f1, f2 = f[k]
f2['patient'] = k
feat = f1.columns
feat = [el for el in feat if filter_out not in el]
tables.append(f1[feat])
tables_f2.append(f2)
if balance:
if how == "min":
n_min = min([len(t) for t in tables])
for i in range(len(tables)):
sample = np.random.choice(tables[i].index, size=n_min, replace=False)
tables[i] = tables[i].ix[sample]
tables_f2[i] = tables_f2[i].ix[sample]
elif how == "minthresh":
min_thresh = 10000
n_min = min([len(t) for t in tables])
n_min = max(n_min, min_thresh)
for i in range(len(tables)):
size_table = tables[i].shape[0]
sample = np.random.choice(tables[i].index, size=min(n_min, size_table), replace=False)
tables[i] = tables[i].ix[sample]
tables_f2[i] = tables_f2[i].ix[sample]
print("Starting concatenation")
output = pd.concat(tables, axis=0, ignore_index=True)
output_info = pd.concat(tables_f2, axis=0, ignore_index=True)
if downsampling != 1:
sample = np.random.choice(output.index, size=output.shape[0] // downsampling, replace=False)
output = output.ix[sample]
output_info = output_info.ix[sample]
return output, output_info
def drop_na_axis(table):
"""
drop na from table.
Parameters
----------
table: pandas dataframe table,
we will drop all na from this table.
Returns
-------
The table without the na values.
"""
if table.isnull().values.any():
before_feat = table.shape[1]
table = table.dropna(axis=1)
print("We dropped {} features because of NaN".format(before_feat - table.shape[1]))
print("We have {} segmented cells and {} features fed to the umap".format(table.shape[0], table.shape[1]))
return table
def normalise_again_f(table):
"""
Normalising function for a table.
Parameters
----------
table: pandas dataframe table,
table to normalise by the mean and std.
Returns
-------
Normalised table.
"""
feat = table.columns
for el in feat:
table_mean = table.loc[:, el].mean()
table_std = table.loc[:, el].std()
table[el] = (table.loc[:, el] - table_mean) / table_std
return table, table_mean, table_std
def filter_columns(table, filter_in=None, filter_out=None):
"""
Table column filter function.
After this function, the remaining feature will satisfy:
being in filter_in and not being in filter_out.
Parameters
----------
table: pandas dataframe table,
table to normalise by the mean and std.
filter_in: string,
if the pattern is in the feature, keep the feature.
filter_out: string,
if the pattern is in the feature, exclude the feature.
Returns
-------
Table with filtered columns.
"""
feat = table.columns
if filter_in is not None:
feat = [el for el in feat if filter_in in feat]
if filter_out is not None:
feat = [el for el in feat if filter_out not in el]
return table[feat]
def umap_plot(umap_transform, name, table, n_comp=2):
"""
UMAP plot function, performs a density plot of the projected points.
The points belong to the table and can be projected to two or three dimensions with
the umap transform function.
Parameters
----------
umap_transform: trained umap model.
name: string,
name of the saved plot.
table: csv table,
lines of table to project and plot. They have to be the exact same as those used
for training the umap.
Returns
-------
A plot named 'name'.
"""
fig = plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k')
x = drop_na_axis(table)
standard_embedding = umap_transform(table)
if n_comp == 2:
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
data = pd.DataFrame({'x':x, 'y':y})
sns.lmplot('x', 'y', data, fit_reg=False, scatter_kws={"s": 1.0})# , hue=)
else:
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
z = standard_embedding[:, 2]
data = pd.DataFrame({'x':x, 'y':y, 'z':z})
fig, axes = plt.subplots(ncols=3, figsize=(18, 16))
sns.regplot('x', 'y', data, fit_reg=False, scatter_kws={"s": 1.0}, ax=axes[0])
sns.regplot('x', 'z', data, fit_reg=False, scatter_kws={"s": 1.0}, ax=axes[1])
sns.regplot('y', 'z', data, fit_reg=False, scatter_kws={"s": 1.0}, ax=axes[2])
plt.savefig(name)
def pick_samples_from_cluster(data, info, y_pred, name, n_c=20, to_plot=10):
"""
Picks samples from different clusters in a UMAP.
Each point corresponds to an area in the wsi.
Parameters
----------
data: csv table,
data that has been projected and plotted.
info: csv table
same shape as data, each line corresponds to the information of the same line in data.
y_pred: int vector,
as long as the number of lines in data, each integer corresponds to a cluster.
name: string,
name of the resulting folder.
n_c: int,
number of clusters
to_plot: int,
how many samples to pick from each cluster.
Returns
-------
A folder with with to_plot samples from each n_c clusters.
"""
ind = 0
for c in tqdm(range(n_c)):
check_or_create(name + "/cluster_{}".format(c))
index_table_y_pred_class = info.loc[y_pred == c].index
sample = np.random.choice(index_table_y_pred_class, size=to_plot, replace=False)
for _ in range(sample.shape[0]):
patient_slide = os.path.join(data, info.ix[sample[_], 'patient']+".tiff")
ind += 1
img_m = get_cell(sample[_], info, slide=patient_slide, marge=20)
crop_name = name + "/cluster_{}/{}_{}.png".format(c, os.path.basename(patient_slide).split('.')[0],ind)
imsave(crop_name, img_m)
def get_cell(id_, table, slide, marge=0):
"""
Returns the crop encapsulating the cell whose id is 'id_'
Parameters
----------
id_: int,
corresponds to an element in the index of table
table: csv table
table with all identified cell.
slide: wsi,
slide corresponding to the table
marge: int,
whether to take a margin when croping around the cell.
Returns
-------
A crop arround nuclei id_.
"""
info = table.loc[id_]
x_min = int(info["BBox_x_min"])
y_min = int(info["BBox_y_min"])
size_x = int(info["BBox_x_max"] - x_min)
size_y = int(info["BBox_y_max"] - y_min)
para = [x_min - marge, y_min - marge, size_x + 2*marge, size_y+2*marge, 0]
return get_image(slide, para)
def umap_plot_kmeans(umap_transform, name, table, info,
n_comp=2, n_c=10, samples_per_cluster=50,
path_to_slides="/mnt/data3/pnaylor/Data/Biopsy"):
"""
UMAP plot function, performs a scatter plot of the projected points.
In addition to ploting them, this function will give a colour to each point
corresponding to its cluster assignement.
Parameters
----------
umap_transform: trained umap model.
name: string,
name of the saved plot.
table: csv table,
lines of table to project and plot. They have to be the exact same as those used
for training the umap.
info: csv table,
corresponding information table to table.
n_comp: int,
number of connected components to project to.
n_c: int,
number of clusters
samples_per_cluster: int,
number of samples to pick from each cluster for visualisation purposes.
Returns
-------
A plot named 'name' and a folder named with samples from each cluster.
"""
check_or_create(name)
fig = plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k')
x = drop_na_axis(table)
standard_embedding = umap_transform(table)
if n_comp == 2:
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
y_pred = KMeans(n_clusters=n_c, random_state=42).fit_predict(np.array([x, y]).T)
data = pd.DataFrame({'x':x, 'y':y, 'cluster':y_pred})
sns.lmplot('x', 'y', data, hue="cluster", fit_reg=False, scatter_kws={"s": 1.0})
pick_samples_from_cluster(path_to_slides, info, y_pred, name, n_c=n_c, to_plot=samples_per_cluster)
else:
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
z = standard_embedding[:, 2]
y_pred = KMeans(n_clusters=n_c, random_state=42).fit_predict(np.array([x, y, z]).T)
fig, axes = plt.subplots(ncols=3, figsize=(18, 16))
axes[0].scatter(x, y, s=1., c=y_pred, cmap='tab{}'.format(n_c))
axes[1].scatter(x, z, s=1., c=y_pred, cmap='tab{}'.format(n_c))
img_bar = axes[2].scatter(y, z, s=1., c=y_pred, cmap='tab{}'.format(n_c))
bounds = np.linspace(0,n_c,n_c+1)
plt.colorbar(img_bar, ax=axes[2], spacing='proportional', ticks=bounds)
pick_samples_from_cluster(path_to_slides, info, y_pred, name, n_c=n_c, to_plot=samples_per_cluster)
plt.savefig(name + "/cluster_umap.pdf")
def umap_plot_patient(umap_transform, name, table, patient, n_comp=2):
"""
UMAP plot function, performs a scatter plot or densty plot for a patient's tissue.
Parameters
----------
umap_transform: trained umap model.
name: string,
name of the saved plot.
table: csv table,
lines of table to project and plot. They have to be the exact same as those used
for training the umap.
patient: string,
patient id as found in table
n_comp: int,
number of connected components to project to.
Returns
-------
A plot named 'name' representing the scatter plot of the project points of
a given patient.
"""
fig = plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k')
x = drop_na_axis(table)
standard_embedding = umap_transform(table)
if n_comp == 2:
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
data = pd.DataFrame({'x':x, 'y':y, 'patient':patient})
sns.lmplot('x', 'y', data, hue="patient", fit_reg=False, scatter_kws={"s": 1.0})
else:
x = standard_embedding[:, 0]
y = standard_embedding[:, 1]
z = standard_embedding[:, 2]
data = | pd.DataFrame({'x':x, 'y':y, 'z':z, 'patient':patient}) | pandas.DataFrame |
#coding:utf-8
from typing import Set
from scipy.optimize.optimize import main
from basic_config import *
import seaborn as sns
import pandas as pd
def hist_attr(data, attr_names, logs, outpath, col=2, indexed=True):
indexes = 'abcdefghijklmn'
attr_num = len(attr_names)
if attr_num == 0:
logging.error('No attrname stated.')
return None
if attr_num != len(logs):
logging.error('log scale list do not has same length as attr_names.')
return None
if attr_num == 1:
indexed = False
row = attr_num // col
fig, axes = plt.subplots(row, col, figsize=(col * 4.5, row * 3.5))
for i, attr_name in enumerate(attr_names):
r = i // col
c = i % col
ax = axes[r][c]
log = logs[i]
hist_one_attr(data, attr_name, ax, log=log)
xlabel = attr_name
if indexed:
xlabel += '\n(' + indexes[i] + ')'
ax.set_xlabel(xlabel)
plt.tight_layout()
plt.savefig(outpath, dpi=400)
logging.info(f'fig saved to {outpath}')
#一个属性的分布
def hist_one_attr(data, attr_name, ax, log=True):
sns.histplot(data,
x=attr_name,
ax=ax,
log_scale=log,
kde=True,
stat='probability')
def hist_indicators():
# read data
data = | pd.read_csv('data/author_topic_indicators.txt') | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: | pd.Timestamp("2013-03-22 00:00:00") | pandas.Timestamp |
from tenzing.core.model_implementations.typesets import tenzing_standard, tenzing_geometry_set
from tenzing.core.typesets import infer_type, traverse_relation_graph
from tenzing.core.model_implementations import *
import pandas as pd
import numpy as np
from shapely import wkt
int_series = pd.Series(range(10))
int_string_series = pd.Series(range(20)).astype('str')
int_string_nan_series = pd.Series(['1.0', '2.0', np.nan])
bool_string_series = | pd.Series(["True", "False"]) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import unittest
import pandas as pd
import pandas.io.common
import biom
import skbio
import qiime2
from pandas.util.testing import assert_frame_equal, assert_series_equal
from q2_types.feature_table import BIOMV210Format
from q2_types.feature_data import (
TaxonomyFormat, HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
DNAFASTAFormat, DNAIterator, PairedDNAIterator,
PairedDNASequencesDirectoryFormat, AlignedDNAFASTAFormat,
DifferentialFormat, AlignedDNAIterator
)
from q2_types.feature_data._transformer import (
_taxonomy_formats_to_dataframe, _dataframe_to_tsv_taxonomy_format)
from qiime2.plugin.testing import TestPluginBase
# NOTE: these tests are fairly high-level and mainly test the transformer
# interfaces for the three taxonomy file formats. More in-depth testing for
# border cases, errors, etc. are in `TestTaxonomyFormatsToDataFrame` and
# `TestDataFrameToTSVTaxonomyFormat` below, which test the lower-level helper
# functions utilized by the transformers.
class TestTaxonomyFormatTransformers(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_taxonomy_format_to_dataframe_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_dataframe_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_series_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_series_equal(obs, exp)
def test_taxonomy_format_to_series_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_series_equal(obs, exp)
def test_headerless_tsv_taxonomy_format_to_tsv_taxonomy_format(self):
exp = (
'Feature ID\tTaxon\tUnnamed Column 1\tUnnamed Column 2\n'
'seq1\tk__Foo; p__Bar\tsome\tanother\n'
'seq2\tk__Foo; p__Baz\tcolumn\tcolumn!\n'
)
_, obs = self.transform_format(
HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
filename=os.path.join('taxonomy', 'headerless.tsv'))
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_tsv_taxonomy_format_to_dataframe(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TSVTaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_tsv_taxonomy_format_to_series(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TSVTaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_series_equal(obs, exp)
def test_dataframe_to_tsv_taxonomy_format(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Foo', 'Bar']
df = pd.DataFrame([['taxon1', '42', 'foo'], ['taxon2', '43', 'bar']],
index=index, columns=columns, dtype=object)
exp = (
'Feature ID\tTaxon\tFoo\tBar\n'
'seq1\ttaxon1\t42\tfoo\n'
'seq2\ttaxon2\t43\tbar\n'
)
transformer = self.get_transformer(pd.DataFrame, TSVTaxonomyFormat)
obs = transformer(df)
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_series_to_tsv_taxonomy_format(self):
index = pd.Index(['emrakul', 'peanut'], name='Feature ID',
dtype=object)
series = pd.Series(['taxon1', 'taxon2'],
index=index, name='Taxon', dtype=object)
exp = (
'Feature ID\tTaxon\n'
'emrakul\ttaxon1\n'
'peanut\ttaxon2\n'
)
transformer = self.get_transformer(pd.Series, TSVTaxonomyFormat)
obs = transformer(series)
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_biom_table_to_tsv_taxonomy_format(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
obs = transformer(table)
self.assertIsInstance(obs, TSVTaxonomyFormat)
self.assertEqual(
obs.path.read_text(),
'Feature ID\tTaxon\nO0\ta; b\nO1\ta; b\nO2\ta; b\nO3\ta; b\n')
def test_biom_table_to_tsv_taxonomy_format_no_taxonomy_md(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
observation_metadata = [dict(taxon=['a', 'b']) for _ in range(4)]
table = biom.Table(table.matrix_data,
observation_ids=table.ids(axis='observation'),
sample_ids=table.ids(axis='sample'),
observation_metadata=observation_metadata)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
with self.assertRaisesRegex(ValueError,
'O0 does not contain `taxonomy`'):
transformer(table)
def test_biom_table_to_tsv_taxonomy_format_missing_md(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
observation_metadata = [dict(taxonomy=['a', 'b']) for _ in range(4)]
observation_metadata[2]['taxonomy'] = None # Wipe out one entry
table = biom.Table(table.matrix_data,
observation_ids=table.ids(axis='observation'),
sample_ids=table.ids(axis='sample'),
observation_metadata=observation_metadata)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
with self.assertRaisesRegex(TypeError, 'problem preparing.*O2'):
transformer(table)
def test_biom_v210_format_to_tsv_taxonomy_format(self):
filename = os.path.join(
'taxonomy', 'feature-table-with-taxonomy-metadata_v210.biom')
_, obs = self.transform_format(BIOMV210Format, TSVTaxonomyFormat,
filename=filename)
self.assertIsInstance(obs, TSVTaxonomyFormat)
self.assertEqual(
obs.path.read_text(),
'Feature ID\tTaxon\nO0\ta; b\nO1\ta; b\nO2\ta; b\nO3\ta; b\n')
def test_biom_v210_format_no_md_to_tsv_taxonomy_format(self):
with self.assertRaisesRegex(TypeError, 'observation metadata'):
self.transform_format(
BIOMV210Format, TSVTaxonomyFormat,
filename=os.path.join('taxonomy', 'feature-table_v210.biom'))
def test_taxonomy_format_with_header_to_metadata(self):
_, obs = self.transform_format(TaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'3-column.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_taxonomy_format_without_header_to_metadata(self):
_, obs = self.transform_format(TaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'headerless.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp_df = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_format_to_metadata(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'3-column.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_to_metadata_trailing_whitespace_taxon(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join(
'taxonomy',
'trailing_space_taxon.tsv'))
index = pd.Index(['seq1'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_to_metadata_leading_whitespace_taxon(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join(
'taxonomy',
'leading_space_taxon.tsv'))
index = pd.Index(['seq1'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_to_metadata_trailing_leading_whitespace_taxon(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join(
'taxonomy',
'start_end_space_taxon.tsv'))
index = pd.Index(['seq1'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
# In-depth testing of the `_taxonomy_formats_to_dataframe` helper function,
# which does the heavy lifting for the transformers.
class TestTaxonomyFormatsToDataFrame(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_one_column(self):
with self.assertRaisesRegex(ValueError, "two columns, found 1"):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '1-column.tsv')))
def test_blanks(self):
with self.assertRaises(pandas.io.common.EmptyDataError):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'blanks')))
def test_empty(self):
with self.assertRaises(pandas.io.common.EmptyDataError):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', 'empty')))
def test_header_only(self):
with self.assertRaisesRegex(ValueError, 'one row of data'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'header-only.tsv')))
def test_has_header_with_headerless(self):
with self.assertRaisesRegex(ValueError, 'requires a header'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', 'headerless.tsv')),
has_header=True)
def test_jagged(self):
with self.assertRaises(pandas.io.common.ParserError):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', 'jagged.tsv')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, 'duplicated: SEQUENCE1'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join(
'taxonomy', 'duplicate-ids.tsv')))
def test_duplicate_columns(self):
with self.assertRaisesRegex(ValueError, 'duplicated: Column1'):
_taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join(
'taxonomy', 'duplicate-columns.tsv')))
def test_2_columns(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Bacteria; p__Proteobacteria'],
['k__Bacteria']], index=index, columns=['Taxon'],
dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '2-column.tsv')))
assert_frame_equal(obs, exp)
# has_header=True
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '2-column.tsv')),
has_header=True)
assert_frame_equal(obs, exp)
def test_3_columns(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '3-column.tsv')))
assert_frame_equal(obs, exp)
# has_header=True
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy', '3-column.tsv')),
has_header=True)
assert_frame_equal(obs, exp)
def test_valid_but_messy_file(self):
index = pd.Index(
['SEQUENCE1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Bar; p__Baz', 'foo'],
['some; taxonomy; for; ya', 'bar baz']],
index=index, columns=['Taxon', 'Extra Column'],
dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'valid-but-messy.tsv')))
assert_frame_equal(obs, exp)
# has_header=True
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'valid-but-messy.tsv')),
has_header=True)
assert_frame_equal(obs, exp)
def test_headerless(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
# has_header=None (default)
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'headerless.tsv')))
assert_frame_equal(obs, exp)
# has_header=False
obs = _taxonomy_formats_to_dataframe(
self.get_data_path(os.path.join('taxonomy',
'headerless.tsv')),
has_header=False)
assert_frame_equal(obs, exp)
# In-depth testing of the `_dataframe_to_tsv_taxonomy_format` helper function,
# which does the heavy lifting for the transformers.
class TestDataFrameToTSVTaxonomyFormat(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_no_rows(self):
index = pd.Index([], name='Feature ID', dtype=object)
columns = ['Taxon']
df = pd.DataFrame([], index=index, columns=columns, dtype=object)
with self.assertRaisesRegex(ValueError, 'one row of data'):
_dataframe_to_tsv_taxonomy_format(df)
def test_no_columns(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = []
df = pd.DataFrame([[], []], index=index, columns=columns, dtype=object)
with self.assertRaisesRegex(ValueError, 'one column of data'):
_dataframe_to_tsv_taxonomy_format(df)
def test_invalid_index_name(self):
index = | pd.Index(['seq1', 'seq2'], name='Foo', dtype=object) | pandas.Index |
import unittest
import numpy as np
from pandas.core.api import Series
import pandas.core.algorithms as algos
import pandas.util.testing as tm
class TestMatch(unittest.TestCase):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0])
self.assert_(np.array_equal(result, expected))
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1])
self.assert_(np.array_equal(result, expected))
class TestUnique(unittest.TestCase):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = | algos.unique(arr) | pandas.core.algorithms.unique |
import os
import numpy as np
import flopy
import pandas as pd
import sys
def vonNeumann_max_dt(transmiss ,
s,
dx,
const = 0.49
):
return(s*dx**2/(2*transmiss/const))
### Taking into account higher leakage through boreholes
def get_realLeakage(area_welllocs = 0.3, #meter^2
area_model = 2500, #meter^2
kf_welllocs = 1E-7, #meter/day
kf_natural = 1E-6 #meter/day
):
return((area_welllocs * kf_welllocs + (area_model - area_welllocs) * kf_natural)/area_model);
def calc_model_wellcoordinates(Ly,
Lx,
csvDir = '.',
csvFile = 'wells_nodes.csv',
exp_dir = '.'):
wells_nodes = pd.read_csv(os.path.join(csvDir, csvFile))
if 'x' not in wells_nodes:
wells_nodes['x'] = 0
if 'y' not in wells_nodes:
wells_nodes['y'] = 0
wells_nodes.loc[:, ['x']] = wells_nodes['X_WERT'] - wells_nodes['X_WERT'].min()
wells_nodes.loc[:, ['x']] = wells_nodes['x'] + (Lx - wells_nodes['x'].max()) - 200
### gleicher Abstand von oberer/unterer Rand
wells_nodes.loc[:, ['y']] = wells_nodes['Y_WERT'] - wells_nodes['Y_WERT'].min()
wells_nodes.loc[:, ['y']] = wells_nodes['y'].max() - wells_nodes['y'] + (Ly - wells_nodes['y'].max())/2
wells_nodes.to_csv(os.path.join(exp_dir, 'wells_nodes.csv'), index = False)
xul = float(wells_nodes.loc[0, ['X_WERT']].values-wells_nodes.loc[0, ['x']].values)
yul = float(wells_nodes.loc[0, ['Y_WERT']].values+wells_nodes.loc[0, ['y']].values)
coord_dict = {"xul" : xul, "yul": yul}
return(coord_dict)
def create_mnw2_csv_perPeriod(csvdir = '.',
basedir = 'SP'):
times = pd.read_csv(os.path.join(csvdir, 'wells_times.csv'))
nodes = pd.read_csv(os.path.join(csvdir, 'wells_nodes.csv'))
for stress_period in pd.unique(times.per):
sp_dir = basedir + str(stress_period)
try:
print('Directory ' + sp_dir + ' already exists!')
os.stat(sp_dir)
except:
print('Creating directory ' + sp_dir + '!')
os.mkdir(sp_dir)
times_per = times.loc[(times.qdes < 0) & (times.per==stress_period)]
times_per.loc[:,'per'] = 0
times_per.to_csv(os.path.join(sp_dir, 'wells_times.csv'), index = False)
nodes_per = nodes[nodes['wellid'].isin( | pd.unique(times_per.wellid) | pandas.unique |
# -*- coding: utf-8 -*-
"""
we test .agg behavior / note that .apply is tested
generally in test_groupby.py
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (date_range, MultiIndex, DataFrame,
Series, Index, bdate_range, concat)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import SpecificationError, DataError
from pandas.compat import OrderedDict
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
class TestGroupByAggregate(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes(self):
# GH 12821
df = DataFrame(
{'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.first(), exp)
assert_frame_equal(grouped.agg('first'), exp)
assert_frame_equal(grouped.agg({'time': 'first'}), exp)
assert_series_equal(grouped.time.first(), exp['time'])
assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.last(), exp)
assert_frame_equal(grouped.agg('last'), exp)
assert_frame_equal(grouped.agg({'time': 'last'}), exp)
assert_series_equal(grouped.time.last(), exp['time'])
assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.agg(len), exp)
assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes(self):
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
assert_series_equal(result, expected)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
pytest.raises(Exception, grouped.agg, lambda x: x.describe())
pytest.raises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
assert self.ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = | tm.makeTimeDataFrame() | pandas.util.testing.makeTimeDataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_rf= sc_y.inverse_transform (y_pred_test_rf)
y_pred_train1_rf=sc_y.inverse_transform (y_pred_train_rf)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_rf)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_rf)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_rf)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_rf))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_rf)
return mape,rmse,mae
# In[29]:
def lstm_model(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX1 = numpy.reshape(X, (X.shape[0],1,X.shape[1]))
testX1 = numpy.reshape(X1, (X1.shape[0],1,X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX1.shape[1], trainX1.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
# model.summary()
# Fitting the RNN to the Training s
model.fit(trainX1, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX1)
y_pred_test = model.predict(testX1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y1=pd.DataFrame(y1)
y_test= sc_y.inverse_transform (y1)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[30]:
###################################################hybrid based ceemdan####################################################
def hybrid_ceemdan_rf(datass,look_back,data_partition,max_features):
import numpy as np
import pandas as pd
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
import pandas as pd
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe=pd.DataFrame(dfs)
dataset=dataframe.values
train_size = int(len(dataset) * data_partition)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
a= pd.DataFrame(a)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,a)
rmse= sqrt(mean_squared_error(y_test,a))
mae=metrics.mean_absolute_error(y_test,a)
return mape,rmse,mae
# In[31]:
def hybrid_ceemdan_lstm(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
from PyEMD import CEEMDAN
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
numpy.random.seed(1234)
# Fitting the RNN to the Training set
model.fit(trainX, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX)
y_pred_test = model.predict(testX)
# make predictions
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_train= numpy.array(y_pred_train).ravel()
y_pred_train=pd.DataFrame(y_pred_train)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
result_pred_test= pd.DataFrame.from_records(pred_test)
result_pred_train= pd.DataFrame.from_records(pred_train)
a=result_pred_test.sum(axis = 0, skipna = True)
b=result_pred_train.sum(axis = 0, skipna = True)
dataframe=pd.DataFrame(dfs)
dataset=dataframe.values
train_size = int(len(dataset) * data_partition)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
a= pd.DataFrame(a)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,a)
rmse= sqrt(mean_squared_error(y_test,a))
mae=metrics.mean_absolute_error(y_test,a)
return mape,rmse,mae
# In[32]:
def proposed_method(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
from PyEMD import CEEMDAN
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
n_imf=len(data_imf.columns)
k=list(range(1,n_imf))
m=[0]
for i in m:
datasetss2=pd.DataFrame(data_imf[i])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_pred_train1= sc_y.inverse_transform (y_pred_train)
pred_test.append(y_pred_test1)
test_ori.append(y_test)
pred_train.append(y_pred_train1)
train_ori.append(y_train)
for i in k:
datasetss2=pd.DataFrame(data_imf[i])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train= | pd.DataFrame(trainY) | pandas.DataFrame |
import sys
import os
import json
import pandas as pd
import features_topk as ft
from sklearn.inspection import permutation_importance
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import precision_score, recall_score
######################################################################
# READ INPUT #
######################################################################
def read_value(string):
if string[0] == "'" and string[-1] == "'":
return string[1:-1]
val = string
try:
val = int(string)
except:
try:
val = float(string)
except:
pass
return val
def load_options(fname):
d_options = {}
f = open(fname, "r")
lines = f.readlines()
#print(lines)
f.close()
for line in lines:
ignore = 0
if (len(line) > 0):
if line[0] == "#":
ignore = 1
if (ignore == 0 and "\t" in line):
line = line.rstrip() #[:-1]
li = line.split("\t")
d_options[li[0]] = read_value(li[1])
print(d_options)
return d_options
def get_list(d):
# File number indices;starting 0; excluding end index
if "MAL_START" in d.keys():
mstart = d["MAL_START"]
mend = d["MAL_END"]
else:
mstart = 0
mend = d["MAL_TOTAL"]
if "BEN_INSTSTART" in d.keys():
bstart = d["BEN_INSTSTART"]
bend = d["BEN_INSTEND"]
else:
bstart = mend
bend = mend+d["BEN_INSTNUM"]
if "MAL_INSTSTART" in d.keys():
mistart = d["MAL_INSTSTART"]
miend = d["MAL_INSTEND"]
else:
mistart = 0
miend = d["MAL_INSTNUM"]
if not "DATA_LOC" in d.keys():
raise Exception("Input folder path not found in options.")
sys.exit()
if "HOSTFTS" in d.keys():
if d["HOSTFTS"].lower() == "true":
hostfts = bool(True)
else:
hostfts = bool(False)
else:
hostfts = bool(False)
print(" ===== Hostfts Used?: ",hostfts)
if "FOLD_TOTAL" in d:
foldtotal = d["FOLD_TOTAL"]
else:
foldtotal = 10
# 0: Binary classification; 1: Multiclass; 2: Multi class, multi target label
if "MULTICLASS" in d:
multi = d["MULTICLASS"] # 0: binary classification, 1: multiclass, 2: multilabel, multiclass
else:
multi = 0 # binary classification by default
totalmal = (mend - mstart) * (miend+1) # Each mal instance X => X.cell & X-Y.cell; 0 <= Y < miend
totalben = bend - bstart # Each ben instance Y => Y.cell; bstart <= Y < bend
malfnames = []
benfnames = []
for (dp, dnames, fnames) in os.walk(d["DATA_LOC"]):
for fname in fnames:
if "-" in fname:
if fname not in malfnames:
malfnames += [dp+fname]
print(fname+"----Malicious")
else:
fnum = int(fname.split(".")[0])
if fname not in malfnames and fnum >= mstart and fnum < mend:
#print(fname)
malfnames += [dp+fname]
print(fname+"----Malicious")
else:
# Take benign data only for binary classification
if fname not in benfnames and fnum >= bstart and fnum < bend and multi == 0:
benfnames += [dp+fname]
print(fname+"----Benign")
print("Malstart:%d\nMalend: %d\nBenstart:%d\nBenend:%d\n"%(mstart,mend-1,bstart, bend-1))
readm = len(malfnames)
readb = len(benfnames)
if not readm == totalmal:
print("Malware files read (%d) and options spec (%d) mismatch"%(readm, totalmal))
#malfnames.sort()
#print(malfnames)
if not readb == totalben:
print("Benign files read (%d) and options spec (%d) mismatch"%(readb, totalben))
#benfnames.sort()
#print(benfnames)
print("Read:\n Malware files: %d\n Benign files:%d\n CV Folds:%d\n"%(readm, readb, foldtotal))
return [malfnames, benfnames, foldtotal, multi, mend, miend, hostfts]
# Read AVCLASS results; map md5 hash -> sha256
def check_dependencies(d):
if "MULTICLASS" in d:
if d["MULTICLASS"] == 2:
if "AVCLASS_FILE_LOC" in d:
location = d["AVCLASS_FILE_LOC"]
else:
location = os.getcwd()+"avclass/AVCLASS.classes"
if not os.path.exists(location):
print("AVCLASS label file required for multilabel classification! : ",location)
return False
if "VT_REPORTS_LOC" in d:
reportloc = d["VT_REPORTS_LOC"]
else:
reportloc = os.getcwd()+"avclass/reports.avclass"
if not os.path.exists(reportloc):
print("VT detail reports required for AVCLASS labels of malware samples!: ", reportloc)
return False
else:
print("Set 'MULTICLASS = 2' in options file to run multiclass, multi label classifier\n (Mode=0 : Binary classification, Mode=1 : Multi class classification)")
return False
else:
print("Can't reach here")
return False
return True
def getfiles(d):
if "AVCLASS_FILE_LOC" in d:
location = d["AVCLASS_FILE_LOC"]
else:
location = os.getcwd()+"avclass/AVCLASS.classes"
if "VT_REPORTS_LOC" in d:
reportloc = d["VT_REPORTS_LOC"]
else:
reportloc = os.getcwd()+"avclass/reports.avclass"
return [location, reportloc]
def get_sha_md5map(reports="avclass/reports.avclass"):
# Map SHA256 - md5
md5_sha = dict()
sha_md5 = dict()
skipped = 0
with open(reports, "r") as f:
for line in f.readlines():
datadt = json.loads(line)
#print(datadt)
if "data" in datadt:
if "attributes" in datadt["data"]:
attdt = datadt["data"]["attributes"]
keys = attdt.keys()
if not ("md5" in keys and "sha256" in keys):
skipped += 1
#print(line)
print("No MD5/SHA hash in report , skipping....\n")
continue
else:
md5 = attdt["md5"]
sha = attdt["sha256"]
if md5 not in md5_sha:
md5_sha[md5] = sha
if sha not in sha_md5:
sha_md5[sha] = md5
print("Not found: ", skipped)
print("# of md5-labels read: ", len(sha_md5))
return [md5_sha, sha_md5]
# return: sha256 malware hash: [class labels]
def read_multilabels(avclassfile, reports):
sha_md5 = get_sha_md5map(reports)[0] # needs md5 -> sha map
classdt = dict()
with open(avclassfile, "r") as ff:
for line in ff.readlines():
if "," in line:
line = line.replace(","," ")
line = line.rstrip().split(" ")
md5 = line[0]
if line[1].isnumeric():
labellst = line[2:]
else:
labellst = line[1:]
#print(labellst)
labels = labellst[0::2]
print("Labels: ", labels)
sha = None
if md5 in sha_md5:
sha = sha_md5[md5]
if sha not in classdt:
classdt[sha] = labels
#else:
# print("Duplicate entries for sample!Skipping", sha, labels, classdt[sha])
else:
print("VT report not found!! Skipping sample: ", md5, labels)
#print(md5, labels)
print("# Samples with AVCLASS labels: ", len(classdt))
return classdt
# Read map index file (malware hash->id)
# return: {mapi: [malware class labels, shahash]}
# NOTE: Needs "MAPFILE" location in options file
def sha_mapi(d, classdt, multiclass=False):
sha_label_map = dict()
uniq_labels = []
if "MAPFILE" in d:
fname = d["MAPFILE"]
with open(fname, "r") as f:
for line in f.readlines():
line = line.rstrip().split("\t")
shahash = line[0]
mapi = int(line[1])
#print(shahash, mapi)
if not multiclass: # It is Multi label case
labels = ["unknown"]
if shahash in classdt.keys():
labels = classdt[shahash] # Get multilabels
else:
for sha, l in classdt.items():
if shahash in sha or shahash == sha:
labels = l
shahash = sha
break
if shahash not in sha_label_map:
sha_label_map[mapi] = [labels, shahash]
for lab in labels:
if lab == "unknown":
print(shahash)
uniq_labels += [lab]
else:
# For multiclass, just return mapi-sha mapping
if shahash not in sha_label_map:
sha_label_map[shahash] = mapi
#print(sha_label_map)
print("Total Malware-AVCLASS mapping: ",len(sha_label_map))
print("Labels in dataset: ", set(uniq_labels))
assert len(sha_label_map) == d["MAL_TOTAL"]
else:
print("Aborting! Map file not available! Run cell extraction script.")
return None
return sha_label_map
# Return: {sha: malware family}
def get_family(sha_md5, malfamily="avclass/Malware.family"):
sha_malfam = dict()
md5_fam = dict()
with open(malfamily, "r") as f:
for line in f.readlines():
line = line.rstrip().split("\t")
md5 = line[0]
fam = line[1]
if "SINGLETON" in fam: #Just take known families for now
continue
print(md5, fam)
if md5 not in md5_fam:
md5_fam[md5] = fam
print("Unique md5-family mapping: ", len(md5_fam))
return md5_fam
# Get AVCLASS Malware families for multiclass labelling:
# location: ../avclass/Malware.family
def get_mal_families(d):
mal_i_fam = dict()
# 1. Get all hashes sha-md5 mapping
sha_md5 = get_sha_md5map()[1] # needs sha-> md5 map
# 2. Get sha-malware index mapping
sha_mal_i = sha_mapi(d, sha_md5, True)
# 3. Get family name from md5
md5_fam = get_family(sha_md5)
# 4. Get mal_i and family
for sha, mal_i in sha_mal_i.items():
#print(sha, mal_i)
if sha in sha_md5:
md5 = sha_md5[sha]
if md5 in md5_fam:
fam = md5_fam[md5]
else:
fam = "mal_"+str(sha_mal_i[sha])
print("SINGLETON replaced with malware index: ", fam)
if mal_i not in mal_i_fam:
mal_i_fam[str(mal_i)] = fam
else:
print("Match for hash NotFound! : ", sha)
print("Sha-md5 and Sha-malwareindex: ", len(sha_md5), len(sha_mal_i))
print("Malware to family name mapping: ", len(mal_i_fam))
return mal_i_fam
# Count malware binary-family distribution
def malware_distribution(labeldt):
count_fam = dict()
# k : file name, fam: [fam, malware_index]
for k, fam in labeldt.items():
if not "-" in k:
if fam[0] not in count_fam:
count_fam[fam[0]] = 1
else:
count_fam[fam[0]] += 1
lst = []
# Family- unique malware in that family
for fam, count in count_fam.items():
lst += [(count, fam)]
lst.sort()
print("Family-malware distriubtion: ", lst)
return
######################################################################
# LABELLING #
######################################################################
# MULTICLASS: 0
def label_binary(malfnames, benfnames):
print("Binary labelling.....")
labeldt = dict()
for mf in malfnames:
if mf not in labeldt:
labeldt[mf] = 1
print(mf,"1")
for bf in benfnames:
if bf not in labeldt:
labeldt[bf] = 0
print(bf,"0")
#print(labeldt)
print("Labelled dataset: ", len(labeldt))
# Assign labels based on file names: return: filename -> label
return labeldt
# MULTICLASS: 1
def label_multiclass(d, malfnames, benfnames, mal_family):
print("Multiclass labelling.......Malware family?:", mal_family)
labeldt = dict()
mallabels = []
if mal_family:
mal_i_fam = get_mal_families(d)
for mf in malfnames:
X = None
fname = mf
mf = mf.split("/")[-1].split(".")[0]
if "-" in mf:
X = str(mf.split("-")[0]) #X-Y.cell -> X is the malware label
else:
X = str(mf) #X.cell -> X
if fname not in labeldt:
if mal_family:
if X in mal_i_fam:
fam = mal_i_fam[X]
print("Family Multiclass Labelling: ", fname, fam)
labeldt[fname] = [fam, X] # family name, malware binary index
else:
print("Don't expect to come here. All malware indices must have mapping")
labeldt[fname] = ["mal_"+str(X), X]
else:
print("Binary Multiclass Labelling: ", fname, X)
labeldt[fname] = X
if X not in mallabels:
mallabels += [X]
'''
benlabel = int(max(mallabels)) + 1
print("Benign class label(max class): ", benlabel)
for bf in benfnames:
if bf not in labeldt:
labeldt[bf] = str(benlabel)
print("Labelling: ", bf, benlabel)
'''
print("Labelled dataset: ", len(labeldt))
print("Unique malware noted: ", len(mallabels))
if mal_family:
malware_distribution(labeldt)
return labeldt
def transform_labels(avlabels):
alllabels = []
for mi, val in avlabels.items():
labels = val[0]
for lab in labels:
if lab not in alllabels:
alllabels.append(lab)
#print(alllabels)
uniqlabels = list(set(alllabels))
print(uniqlabels)
mlb = MultiLabelBinarizer()
mlb.fit([uniqlabels])
print("MULTI LABEL CLASSES: ",mlb.classes_)
#print(result)
return [mlb, mlb.classes_]
def generate_multilabel(elements, mlb, classorder):
multilabel = mlb.transform([elements])
print(elements, multilabel)
print(type(multilabel))
return multilabel
# MULTICLASS: 2
def label_multiclass_multilabel(malfnames, benfnames, avlabels):
# avlabels-> {'malware mapindex/X in X-Y.cell': [avclass labels, sha]}
print("Multiclass Multilabelling.......")
labeldt = dict()
mallabels = []
# Get multiclass labels from avclass labels
[mlb, classorder] = transform_labels(avlabels)
for mf in malfnames:
X = None
fname = mf
mf = mf.split("/")[-1].split(".")[0]
if "-" in mf:
X = int(mf.split("-")[0]) #X-Y.cell -> X is the malware label
else:
X = int(mf) #X.cell -> X
print("Labelling: ", fname, X)
if fname not in labeldt:
if X in avlabels:
labeldt[fname] = generate_multilabel(avlabels[X][0], mlb, classorder)
else:
print("Label for malware index: %d not available! Check mapping!"%X)
if X not in mallabels:
mallabels += [X]
'''
benlabel = int(max(mallabels)) + 1
print("Benign class label(max class): ", benlabel)
for bf in benfnames:
if bf not in labeldt:
labeldt[bf] = benlabel
print("Labelling: ", bf, benlabel)
'''
#generate_multilabel(avlabels[X][0], mlb, classorder)
print("Labelled dataset: ", len(labeldt))
return [labeldt, classorder, mlb]
######################################################################
# FEATURE EXTRACTION #
######################################################################
def get_topk_conn_cells(data, topk=3):
topkconns = []
torconn1 = []
torconn2 = []
torconn3 = []
for line in data:
if "1#" in line:
torconn1 += [line.split("#")[1]]
elif "2#" in line and topk >= 2:
torconn2 += [line.split("#")[1]]
elif "3#" in line and topk == 3:
torconn3 += [line.split("#")[1]]
elif "HOSTFTS" in line:
continue
#print("T1: ", torconn1)
t1 = len(torconn1)
t2 = len(torconn2)
t3 = len(torconn3)
print("Top 3 Tor connection cells noted: ", t1, t2, t3)
if t1 > 0 and t2 > 0 and t3 > 0:
topkconns += [torconn1, torconn2, torconn3]
elif t2 == 0:
topkconns = [torconn1]
elif t2 > 0 and t3 == 0:
topkconns = [torconn1, torconn2]
#print("Topkconns: ", topkconns)
return topkconns
def extract_features(labeldt, multiclass, hostfts, top=3, classorder=[], malfamily=False, trainmulti=True):
feats = []
c = 0
totalcols = []
topkcount = []
print("Extracting features for Classification Mode: ", multiclass)
print("Host fts switch active?: ", hostfts)
all_multilabels = []
famcount = dict()
for fpath, label in labeldt.items():
print("*",fpath, label)
data = open(fpath).readlines()
print(top)
topkconns = get_topk_conn_cells(data, top)
print("Taking Top: ", len(topkconns))
topkcount += [len(topkconns)]
if len(topkconns) == 0:
continue
for conndata in topkconns: # Save Fts:label in DF for each conn in topk
tcp_dump = conndata #[:101] # just take 1st 100 cells
#if hostfts and "##HOST_FTS" not in tcp_dump:
# tcp_dump += [str(data[-1])]
# print("Host fts added: ", data[-1])
print("TCPDUMP CELLS: ", len(tcp_dump))
fts = []
print(fpath)
fts = ft.TOTAL_FEATURES(tcp_dump, False) #Keep bool vals for topk
if hostfts:
# Extract only host fts here
Hfts = ft.TOTAL_FEATURES(data, False, True)
print("Host fts: ", Hfts)
assert len(Hfts) > 0
fts += Hfts
assert len(fts) == 215
print("Full fts: ", fts)
totalcols += [len(fts)]
print("Extracting features: ", fpath, label)
# Multi class classification
if multiclass == 1:
print("Multi class: label :", label)
fam = label[0]
if malfamily: # balance family-instances
mali = label[1]
print("Malware families used for multi class labels")
if fam not in famcount:
famcount[fam] = [mali]
else:
if len(famcount[fam]) < 6 and mali not in famcount[fam]:
famcount[fam] += [mali]
fam_malcount = len(famcount[fam])
if fam == "agentb" or fam == "shade" or fam == "nymeria":
# Take only 6 unique binaries / max no. class binaries and instances
if fam_malcount <= 6 and mali in famcount[fam]:
feats += [fts+[fam]]
else:
feats += [fts+[fam]]
else:
# Normal multiclass case
feats += [fts+[fam]]
print("Normal multiclass", fts)
# Multi label classification
elif multiclass == 2:
if trainmulti:
all_multilabels += list(label)
else:
all_multilabels += [label]
feats += [fts]
else:
# binary classification
feats += [fts+[label]]
print("Total features: ", max(totalcols))
#print(feats)
print("Total files for which features extracted: ", len(feats))
#print(all_multilabels)
featdf = pd.DataFrame(feats)
##print("Topk connection distribution per file: ", topkcount.sort())
if multiclass == 2:
#featdf['target'] = [l[0] for l in featdf['target']]
if list(classorder) == [] and trainmulti:
print("Classorder needed to set labels in DF!")
return None
else:
if not trainmulti: # Return just features for testing
labeldf = | pd.DataFrame(all_multilabels) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = | pd.Series([100., 125., 90.], dtype='float') | pandas.Series |
import collections
import logging
import numpy as np
import pandas as pd
import core.event_study as esf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestBuildLocalTimeseries(hut.TestCase):
def test_minutely1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "T"
start_date = pd.Timestamp("2009-09-29 10:00:00")
relative_grid_indices = list(range(-10, 10)) + [14]
idx = pd.date_range(start_date, periods=n_periods, freq=freq)
events = pd.DataFrame(data={"ind": 1}, index=idx)
grid_idx = pd.date_range(
start_date - pd.Timedelta(f"50{freq}"),
freq=freq,
periods=n_periods + 100,
)
grid_data = pd.DataFrame(np.random.randn(len(grid_idx)), index=grid_idx)
info: collections.OrderedDict = collections.OrderedDict()
local_ts = esf.build_local_timeseries(
events, grid_data, relative_grid_indices, info=info
)
str_info = str(info).replace("None", f"'{freq}'")
self.check_string(f"local_ts:\n{local_ts.to_string()}\ninfo:\n{str_info}")
def test_daily1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "D"
start_date = pd.Timestamp("2009-09-29 10:00:00")
relative_grid_indices = list(range(-10, 10)) + [14]
idx = pd.date_range(start_date, periods=n_periods, freq=freq)
events = pd.DataFrame(data={"ind": 1}, index=idx)
grid_idx = pd.date_range(
start_date - pd.Timedelta(f"50{freq}"),
freq=freq,
periods=n_periods + 100,
)
grid_data = pd.DataFrame(np.random.randn(len(grid_idx)), index=grid_idx)
info: collections.OrderedDict = collections.OrderedDict()
local_ts = esf.build_local_timeseries(
events, grid_data, relative_grid_indices, info=info
)
str_info = str(info).replace("None", f"'{freq}'")
self.check_string(f"local_ts:\n{local_ts.to_string()}\ninfo:\n{str_info}")
def test_daily_shift_freq1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "D"
shift_freq = "2D"
start_date = pd.Timestamp("2009-09-29 10:00:00")
relative_grid_indices = list(range(-10, 10)) + [14]
idx = pd.date_range(start_date, periods=n_periods, freq=freq)
events = pd.DataFrame(data={"ind": 1}, index=idx)
grid_idx = pd.date_range(
start_date - pd.Timedelta(f"50{freq}"),
freq=freq,
periods=n_periods + 100,
)
grid_data = pd.DataFrame(np.random.randn(len(grid_idx)), index=grid_idx)
info: collections.OrderedDict = collections.OrderedDict()
local_ts = esf.build_local_timeseries(
events, grid_data, relative_grid_indices, freq=shift_freq, info=info
)
str_info = str(info).replace("None", f"'{freq}'")
self.check_string(f"local_ts:\n{local_ts.to_string()}\ninfo:\n{str_info}")
def test_multiple_responses_daily1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "D"
start_date = pd.Timestamp("2009-09-29 10:00:00")
n_cols = 2
relative_grid_indices = list(range(-10, 10)) + [14]
idx = pd.date_range(start_date, periods=n_periods, freq=freq)
events = pd.DataFrame(data={"ind": 1}, index=idx)
grid_idx = pd.date_range(
start_date - pd.Timedelta(f"50{freq}"),
freq=freq,
periods=n_periods + 100,
)
grid_data = pd.DataFrame(
np.random.randn(len(grid_idx), n_cols), index=grid_idx
)
info: collections.OrderedDict = collections.OrderedDict()
local_ts = esf.build_local_timeseries(
events, grid_data, relative_grid_indices, info=info
)
str_info = str(info).replace("None", f"'{freq}'")
self.check_string(f"local_ts:\n{local_ts.to_string()}\ninfo:\n{str_info}")
class TestUnwrapLocalTimeseries(hut.TestCase):
def test_daily1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "D"
start_date = pd.Timestamp("2009-09-29 10:00:00")
relative_grid_indices = list(range(-10, 10)) + [14]
timestamps = pd.date_range(start_date, periods=n_periods, freq=freq)
idx = pd.MultiIndex.from_product([relative_grid_indices, timestamps])
local_ts = pd.DataFrame(np.random.randn(len(idx)), index=idx)
grid_data = pd.DataFrame(np.random.randn(n_periods), index=timestamps)
unwrapped = esf.unwrap_local_timeseries(local_ts, grid_data)
self.check_string(unwrapped.to_string())
def test_minutely1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "T"
start_date = pd.Timestamp("2009-09-29 10:00:00")
relative_grid_indices = list(range(-10, 10)) + [14]
timestamps = pd.date_range(start_date, periods=n_periods, freq=freq)
idx = | pd.MultiIndex.from_product([relative_grid_indices, timestamps]) | pandas.MultiIndex.from_product |
import sys
from osgeo import gdal
import numpy as np
import cv2
import scipy.stats
from scipy.sparse import csgraph, csr_matrix
import pandas as pd
from sklearn.cluster import KMeans
from skimage import segmentation as skseg
from skimage.measure import regionprops
from sklearn import preprocessing
BG_VAL = -1
MASK_VAL = 9999
try:
type(profile)
except NameError:
def profile(fn): return fn
@profile
def neighbor_matrix(labels, bg=True, connectivity=4, touch=True):
"""
Generate a connectivity matrix of all labels in the label map.
Parameters
----------
labels : np.array, shape (M,N)
The label map.
bg : bool, optional
Whether to include the background.
connectivity : int, optional
One of [4,8]. If 8, labels also connect via corners.
touch : bool, optional
(legacy option) If False, labels are neighbors even if there is a gap
of 1 pixel between them. (default: True)
Returns
-------
pd.DataFrame, shape (L,L)
A DataFrame where index and columns are the unique labels and position
[i,j] is True iff labels i and j are neighbors.
"""
x = np.unique(labels)
if not bg:
x = x[x != BG_VAL]
kernels = [
np.array([[1]]),
np.array([[1, 0, 0]]),
np.array([[0, 0, 1]]),
np.array([[1, 0, 0]]).T,
np.array([[0, 0, 1]]).T
]
if connectivity == 8:
kernels.extend([
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 1], [0, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 0], [1, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]]),
])
shifted = np.stack([cv2.filter2D(labels.astype(np.float32), -1, k)
for k in kernels], axis=-1)
if touch:
bg_neighbors = shifted[labels != BG_VAL]
else:
bg_neighbors = shifted[labels == BG_VAL]
bg_neighbors[bg_neighbors == BG_VAL] = np.nan
bg_neighbors = bg_neighbors[~np.isnan(bg_neighbors).all(axis=1)]
_mins = np.nanmin(bg_neighbors, axis=1).astype(np.int32)
_maxs = np.nanmax(bg_neighbors, axis=1).astype(np.int32)
npairs = np.stack([_mins, _maxs], axis=-1)[_mins != _maxs]
idx = np.arange(len(x))
lookup = dict(np.stack([x, idx], axis=-1))
npairs_idx = np.vectorize(lambda x: lookup[x], otypes=[np.int32])(npairs)
result = np.zeros((len(x),) * 2, dtype=bool)
result[npairs_idx[:, 0], npairs_idx[:, 1]] = True
result[npairs_idx[:, 1], npairs_idx[:, 0]] = True
result[x == BG_VAL, :] = False
result[:, x == BG_VAL] = False
# DEBUG: Somehow this line is very expensive:
# result = np.logical_or(result, result.T)
return pd.DataFrame(result, index=x, columns=x)
@profile
def edge_length(labels, bg=True, connectivity=4):
"""
Compute the length of an edge between any two labels.
Parameters
----------
labels : np.array, shape (M,N)
The label map.
bg : bool, optional
Whether to include the background.
connectivity : int, optional
One of [4,8]. If 8, labels also connect via corners.
Returns
-------
pd.DataFrame, shape (L,L)
A DataFrame where index and columns are the unique labels and position
[i,j] is the length of the edge between labels i and j.
"""
x = np.unique(labels)
if not bg:
x = x[x != BG_VAL]
kernels = [
np.array([[1]]),
np.array([[1, 0, 0]]),
np.array([[0, 0, 1]]),
np.array([[1, 0, 0]]).T,
np.array([[0, 0, 1]]).T
]
if connectivity == 8:
kernels.extend([
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 1], [0, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 0], [1, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]]),
])
shifted = np.stack([cv2.filter2D(labels.astype(np.float32), -1, k)
for k in kernels], axis=-1)
if not bg:
shifted[shifted == BG_VAL] = np.nan
_mins = np.nanmin(shifted, axis=2).astype(np.int32)
_maxs = np.nanmax(shifted, axis=2).astype(np.int32)
edge = (_mins != _maxs)
l1 = _mins[edge]
l2 = _maxs[edge]
pairs = np.stack([l1, l2], axis=-1)
pairs_idx = _replace_labels(pairs, pd.Series(np.arange(len(x)), index=x))
result = np.zeros(x.shape*2, dtype=np.int32)
result[pairs_idx[:, 0], pairs_idx[:, 1]] += 1
result[pairs_idx[:, 1], pairs_idx[:, 0]] += 1
result /= 2
return pd.DataFrame(result, index=x, columns=x)
@profile
def _remap_labels(labels, lmap):
"""
Remaps labels to new values given a mapping l --> l'.
Parameters
----------
labels : np.array, shape (M,N)
A map of integer labels.
lmap : function or pd.Series or dict
If function, lmap(l) must return the new label.
If pd.Series or dict, lmap[l] must return the new label.
Returns
-------
np.array, shape (M,N)
The new label map.
"""
if callable(lmap):
indexer = np.array([lmap(i) for i in range(labels.min(),
labels.max() + 1)])
else:
_lmap = lmap.copy()
if type(_lmap) is dict:
_lmap = pd.Series(_lmap)
# pad lmap:
fill_index = np.arange(labels.min(), labels.max() + 1)
replace = pd.Series(fill_index, index=fill_index).copy()
replace[_lmap.index] = _lmap.values
indexer = np.array([replace[i] for i in range(labels.min(),
labels.max() + 1)])
return indexer[(labels - labels.min())]
@profile
def merge_connected(labels, connected, touch=False):
"""
Given a labeled map and a label connectivity matrix, merge connected
labels.
Parameters
----------
labels : np.array, shape (M,N)
A map of integer labels.
connected : pd.DataFrame, shape (L,L)
A DataFrame where index and columns are the unique labels and
connected.loc[i,j] == True iff labels i and j are connected wrt.
any measure.
touch : bool, optional
If True, only merge labels that share an edge. (default: False)
Returns
-------
np.array, shape (M,N)
A new map of labels.
"""
x = connected.index
if touch:
nm = neighbor_matrix(labels, touch=True, bg=False)
merge = np.logical_and(connected, nm)
else:
merge = connected
csr = csr_matrix(merge)
cc = csgraph.connected_components(csr, directed=False)
replace = | pd.Series(cc[1], index=x) | pandas.Series |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = | Index(['A','B','A','C']) | pandas.core.index.Index |
"""
This code is copied from Philippjfr's notebook:
https://anaconda.org/philippjfr/sankey/notebook
"""
from functools import cmp_to_key
import holoviews as hv
import numpy as np
import pandas as pd
import param
from bokeh.models import Patches
from holoviews import Operation
from holoviews.core.util import basestring, max_range
from holoviews.element.graphs import Graph, Nodes, EdgePaths, Dataset, redim_graph
from holoviews.plotting.bokeh import GraphPlot
class Sankey(Graph):
group = param.String(default='Sankey', constant=True)
def __init__(self, data, kdims=None, vdims=None, compute=True, **params):
if isinstance(data, tuple):
data = data + (None,) * (3 - len(data))
edges, nodes, edgepaths = data
else:
edges, nodes, edgepaths = data, None, None
if nodes is not None:
if not isinstance(nodes, Dataset):
if nodes.ndims == 3:
nodes = Nodes(nodes)
else:
nodes = Dataset(nodes)
nodes = nodes.clone(kdims=nodes.kdims[0],
vdims=nodes.kdims[1:])
node_info = nodes
super(Graph, self).__init__(edges, kdims=kdims, vdims=vdims, **params)
if compute:
self._nodes = nodes
chord = layout_sankey(self)
self._nodes = chord.nodes
self._edgepaths = chord.edgepaths
self._sankey = chord._sankey
else:
if not isinstance(nodes, Nodes):
raise TypeError("Expected Nodes object in data, found %s."
% type(nodes))
self._nodes = nodes
if not isinstance(edgepaths, EdgePaths):
raise TypeError("Expected EdgePaths object in data, found %s."
% type(edgepaths))
self._edgepaths = edgepaths
self._sankey = None
self._validate()
self.redim = redim_graph(self, mode='dataset')
class SankeyPlot(GraphPlot):
label_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
filled = True
_style_groups = dict(GraphPlot._style_groups, quad='nodes', text='label')
_draw_order = ['patches', 'multi_line', 'text', 'quad']
style_opts = GraphPlot.style_opts + ['edge_fill_alpha', 'nodes_line_color', 'label_text_font_size']
def _init_glyphs(self, plot, element, ranges, source):
ret = super(SankeyPlot, self)._init_glyphs(plot, element, ranges, source)
renderer = plot.renderers.pop(plot.renderers.index(self.handles['glyph_renderer']))
plot.renderers = [renderer] + plot.renderers
return ret
def get_data(self, element, ranges, style):
data, mapping, style = super(SankeyPlot, self).get_data(element, ranges, style)
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = data['scatter_1']
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['quad_1'] = quad_data
quad_mapping['fill_color'] = mapping['scatter_1']['node_fill_color']
mapping['quad_1'] = quad_mapping
style['nodes_line_color'] = 'black'
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Chord not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, mapping, style
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name] > 0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
offset = (nodes[0]['x1'] - nodes[0]['x0']) / 4.
xs = np.array([node['x1'] for node in nodes])
data['text_1'] = dict(x=xs + offset, y=ys, text=[str(l) for l in labels])
mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align='left')
return data, mapping, style
def get_extents(self, element, ranges):
"""
A Chord plot is always drawn on a unit circle.
"""
xdim, ydim = element.nodes.kdims[:2]
xpad = .05 if self.label_index is None else 0.25
x0, x1 = ranges[xdim.name]
y0, y1 = ranges[ydim.name]
xdiff = (x1 - x0)
ydiff = (y1 - y0)
x0, x1 = max_range([xdim.range, (x0 - (0.05 * xdiff), x1 + xpad * xdiff)])
y0, y1 = max_range([ydim.range, (y0 - (0.05 * ydiff), y1 + (0.05 * ydiff))])
return (x0, y0, x1, y1)
def _postprocess_hover(self, renderer, source):
if self.inspection_policy == 'edges':
if not isinstance(renderer.glyph, Patches):
return
else:
if isinstance(renderer.glyph, Patches):
return
super(SankeyPlot, self)._postprocess_hover(renderer, source)
def weightedSource(link):
return nodeCenter(link['source']) * link['value']
def weightedTarget(link):
return nodeCenter(link['target']) * link['value']
def nodeCenter(node):
return (node['y0'] + node['y1']) / 2
def ascendingBreadth(a, b):
return int(a['y0'] - b['y0'])
def ascendingSourceBreadth(a, b):
return ascendingBreadth(a['source'], b['source']) | a['index'] - b['index']
def ascendingTargetBreadth(a, b):
return ascendingBreadth(a['target'], b['target']) | a['index'] - b['index']
def quadratic_bezier(start, end, c0=(0, 0), c1=(0, 0), steps=25):
"""
Compute quadratic bezier spline given start and end coordinate and
two control points.
"""
steps = np.linspace(0, 1, steps)
sx, sy = start
ex, ey = end
cx0, cy0 = c0
cx1, cy1 = c1
xs = ((1 - steps) ** 3 * sx + 3 * ((1 - steps) ** 2) * steps * cx0 +
3 * (1 - steps) * steps ** 2 * cx1 + steps ** 3 * ex)
ys = ((1 - steps) ** 3 * sy + 3 * ((1 - steps) ** 2) * steps * cy0 +
3 * (1 - steps) * steps ** 2 * cy1 + steps ** 3 * ey)
return np.column_stack([xs, ys])
class layout_sankey(Operation):
"""
Computes a Sankey diagram from a Graph element.
Adapted from d3-sankey under BSD-3 license.
"""
bounds = param.NumericTuple(default=(0, 0, 1000, 500))
node_width = param.Number(default=15)
node_padding = param.Integer(default=10)
iterations = param.Integer(32)
def _process(self, element, key=None):
graph = {'nodes': [], 'links': []}
self.computeNodeLinks(element, graph)
self.computeNodeValues(graph)
self.computeNodeDepths(graph)
self.computeNodeBreadths(graph)
self.computeLinkBreadths(graph)
paths = []
for link in graph['links']:
source, target = link['source'], link['target']
x0, y0 = source['x1'], link['y0']
x1, y1 = target['x0'], link['y1']
start = np.array([(x0, link['width'] + y0),
(x0, y0)])
src = (x0, y0)
ctr1 = ((x0 + x1) / 2., y0)
ctr2 = ((x0 + x1) / 2., y1)
tgt = (x1, y1)
bottom = quadratic_bezier(src, tgt, ctr1, ctr2)
mid = np.array([(x1, y1),
(x1, y1 + link['width'])])
xmid = (x0 + x1) / 2.
y0 = y0 + link['width']
y1 = y1 + link['width']
src = (x1, y1)
ctr1 = (xmid, y1)
ctr2 = (xmid, y0)
tgt = (x0, y0)
top = quadratic_bezier(src, tgt, ctr1, ctr2)
spline = np.concatenate([start, bottom, mid, top])
paths.append(spline)
node_data = []
for node in graph['nodes']:
node_data.append((np.mean([node['x0'], node['x1']]),
np.mean([node['y0'], node['y1']]),
node['index']) + tuple(node['values']))
nodes = Nodes(node_data, vdims=element.nodes.vdims)
edges = EdgePaths(paths)
sankey = Sankey((element.data, nodes, edges), compute=False)
sankey._sankey = graph
return sankey
def computeNodeLinks(self, element, graph):
"""
Populate the sourceLinks and targetLinks for each node.
Also, if the source and target are not objects, assume they are indices.
"""
index = element.nodes.kdims[-1]
node_map = {}
values = element.nodes.array(element.nodes.vdims)
for node, vals in zip(element.nodes.dimension_values(index), values):
node = {'index': node, 'sourceLinks': [], 'targetLinks': [], 'values': vals}
graph['nodes'].append(node)
node_map[node['index']] = node
links = [element.dimension_values(d) for d in element.dimensions()[:3]]
for i, (src, tgt, value) in enumerate(zip(*links)):
source, target = node_map[src], node_map[tgt]
link = dict(index=i, source=source, target=target, value=value)
graph['links'].append(link)
source['sourceLinks'].append(link)
target['targetLinks'].append(link)
def computeNodeValues(self, graph):
"""
Compute the value (size) of each node by summing the associated links.
"""
for node in graph['nodes']:
source_val = np.sum([l['value'] for l in node['sourceLinks']])
target_val = np.sum([l['value'] for l in node['targetLinks']])
node['value'] = max([source_val, target_val])
def computeNodeDepths(self, graph):
"""
Iteratively assign the depth (x-position) for each node.
Nodes are assigned the maximum depth of incoming neighbors plus one;
nodes with no incoming links are assigned depth zero, while
nodes with no outgoing links are assigned the maximum depth.
"""
nodes = graph['nodes']
depth = 0
while nodes:
next_nodes = []
for node in nodes:
node['depth'] = depth
for link in node['sourceLinks']:
if link['target'] not in next_nodes:
next_nodes.append(link['target'])
nodes = next_nodes
depth += 1
nodes = graph['nodes']
depth = 0
while nodes:
next_nodes = []
for node in nodes:
node['height'] = depth
for link in node['targetLinks']:
if link['source'] not in next_nodes:
next_nodes.append(link['source'])
nodes = next_nodes
depth += 1
x0, _, x1, _ = self.p.bounds
dx = self.p.node_width
kx = (x1 - x0 - dx) / (depth - 1)
for node in graph['nodes']:
d = node['depth'] if node['sourceLinks'] else depth - 1
node['x0'] = x0 + max([0, min([depth - 1, np.floor(d)]) * kx])
node['x1'] = node['x0'] + dx
def computeNodeBreadths(self, graph):
node_map = hv.OrderedDict()
for n in graph['nodes']:
if n['x0'] not in node_map:
node_map[n['x0']] = []
node_map[n['x0']].append(n)
_, y0, _, y1 = self.p.bounds
py = self.p.node_padding
def initializeNodeBreadth():
kys = []
for nodes in node_map.values():
nsum = np.sum([node['value'] for node in nodes])
ky = (y1 - y0 - (len(nodes) - 1) * py) / nsum
kys.append(ky)
ky = np.min(kys)
for nodes in node_map.values():
for i, node in enumerate(nodes):
node['y0'] = i
node['y1'] = i + node['value'] * ky
for link in graph['links']:
link['width'] = link['value'] * ky
def relaxLeftToRight(alpha):
for nodes in node_map.values():
for node in nodes:
if not node['targetLinks']:
continue
weighted = sum([weightedSource(l) for l in node['targetLinks']])
tsum = sum([l['value'] for l in node['targetLinks']])
center = nodeCenter(node)
dy = (weighted / tsum - center) * alpha
node['y0'] += dy
node['y1'] += dy
def relaxRightToLeft(alpha):
for nodes in list(node_map.values())[::-1]:
for node in nodes:
if not node['sourceLinks']:
continue
weighted = sum([weightedTarget(l) for l in node['sourceLinks']])
tsum = sum([l['value'] for l in node['sourceLinks']])
center = nodeCenter(node)
dy = (weighted / tsum - center) * alpha
node['y0'] += dy
node['y1'] += dy
def resolveCollisions():
for nodes in node_map.values():
y = y0
n = len(nodes)
nodes.sort(key=cmp_to_key(ascendingBreadth))
for node in nodes:
dy = y - node['y0']
if dy > 0:
node['y0'] += dy
node['y1'] += dy
y = node['y1'] + py
dy = y - py - y1
if dy > 0:
node['y0'] -= dy
node['y1'] -= dy
y = node['y0']
for node in nodes[:-1][::-1]:
dy = node['y1'] + py - y;
if dy > 0:
node['y0'] -= dy
node['y1'] -= dy
y = node['y0']
initializeNodeBreadth()
resolveCollisions()
alpha = 1
for _ in range(self.p.iterations):
alpha = alpha * 0.99
relaxRightToLeft(alpha)
resolveCollisions()
relaxLeftToRight(alpha)
resolveCollisions()
def computeLinkBreadths(self, graph):
for node in graph['nodes']:
node['sourceLinks'].sort(key=cmp_to_key(ascendingTargetBreadth))
node['targetLinks'].sort(key=cmp_to_key(ascendingSourceBreadth))
for node in graph['nodes']:
y0 = y1 = node['y0']
for link in node['sourceLinks']:
link['y0'] = y0
y0 += link['width']
for link in node['targetLinks']:
link['y1'] = y1
y1 += link['width']
# Register Sankey with holoviews
hv.Store.register({Sankey: SankeyPlot}, 'bokeh')
# Convenience function for adding links
def make_links(df, groups):
"""
Makes links given a set of groups and a dataframe
:param pd.DataFrame df: Input dataframe containing groups
:param list groups: List of groups to link
:return: DataFrame of links
:rtype: pd.DataFrame
"""
links = []
for i in xrange(len(groups) - 1):
links.extend(_add_links(df.groupby(groups[i])[groups[i + 1]].value_counts().iteritems()))
return pd.DataFrame(links)
def _add_links(iteritems):
links = []
type_count = 0
current_type = None
for pair, count in iteritems:
source, target = pair
# Track type by grouping samples by "source"
if source != current_type:
current_type = source
type_count += 1
links.append({'source': source, 'target': target, 'value': count})#, 'type': type_count})
return links
def sankey_tissue(df, tissues, groups):
"""Sankey Diagram subset by tissue"""
sub = df[df.Tissue.isin(tissues)]
links = make_links(sub, groups)
return Sankey(( | pd.DataFrame(links) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
| tm.assert_index_equal(result, exp, exact=True) | pandas.util.testing.assert_index_equal |
import logging
import os
import pickle
import numpy as np
import pandas as pd
import pika
import sentry_sdk
from sentry_sdk.integrations.logging import LoggingIntegration
sentry_logging = LoggingIntegration(
level=logging.INFO, # Capture info and above as breadcrumbs
event_level=logging.ERROR # Send errors as events
)
sentry_sdk.init(
dsn=os.environ.get('SENTRY_DSN'),
integrations=[sentry_logging]
)
def on_request(ch, method, props, body):
body = pickle.loads(body)
response = model.predict(body.get('popularity_level'), body['user_music'])
body['recommendations'] = response
if props.reply_to and props.correlation_id:
channel.basic_publish(exchange='',
routing_key=props.reply_to,
body=pickle.dumps(body),
properties=pika.BasicProperties(
correlation_id=props.correlation_id),
)
logger.info(f'Predictions sent to web_server')
else:
channel.basic_publish(exchange='',
routing_key='tg_bot_queue',
body=pickle.dumps(body),
properties=pika.BasicProperties(),
)
logger.info(f'Predictions sent to tg_bot')
ch.basic_ack(delivery_tag=method.delivery_tag)
class Recommender(object):
def __init__(self, n_recommendations=5, popularity_level=5):
with open('../data/model_w2v.pkl', 'rb') as f:
self.model = pickle.load(f)
self.popularity = {}
for singer in self.model.wv.vocab.keys():
self.popularity[singer] = self.model.wv.vocab[singer].count
self.n_recommendations = n_recommendations
self.popularity_level = popularity_level
def _pick_random_items(self, items, scores, n):
scores -= scores.min() - 1e-10
scores = scores ** 2
scores /= np.sum(scores)
chosen_items = np.random.choice(items, size=min(n, len(scores)), replace=False, p=scores)
return chosen_items.astype(int).tolist()
def predict(self, popularity_level=None, user_music='Nothing'):
if popularity_level is None:
popularity_level = self.popularity_level
logger.info(f'New recommendation request for user, popularity level: {popularity_level}')
if user_music == 'Nothing':
logger.info(f'User closed access to music')
return 'Sorry, you closed access to your music collection.'
if len(user_music) == 0:
logger.warning('Wrong user id or no music in collection.')
return 'No such user or empty music collection.'
user_music = [word.lower().strip() for word in user_music
if not word.replace(':', '').isnumeric()
and len(word.strip()) > 0
]
logger.info(f'Got {len(user_music)} artists from parser.')
recs = self.model.predict_output_word(user_music, 100)
if recs is None:
logger.warning('user with empty recommendations')
return 'It seems you like something too out of Earth.'
recs = | pd.DataFrame(recs, columns=['band', 'relevance']) | pandas.DataFrame |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = | pd.read_sql(sql_query_coordinates, self.conn) | pandas.read_sql |
from bs4 import BeautifulSoup
import urllib
import pandas as pd
import numpy as np
####################################
# Change these two and let the magic begin
name = 'MASS' # MASS gives a lot
r_or_python = 'r' # choose 'r' or 'python'
####################################
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
names = [name]
dependency_rank = []
impact_score = []
number_reused_by = []
number_contributors = []
number_downloads = []
number_citations = []
hood_size = []
number_commits = []
is_academic = []
contrib_rank = []
all_tags = []
info = []
# This block of code gets the text mining started
r = urllib.urlopen('http://depsy.org/api/package/'+r_or_python+'/'+name).read()
soup = BeautifulSoup(r)
body1 = soup.find('body')
body = str(body1).split('\n')
# This block of code gets the names of dependent packages (only in r/python)
reused_by1 = [i - 1 for i, x in enumerate(body) if 'neighborhood_size' in x]
reused_by2 = reused_by1[1:len(reused_by1)]
reused_name = [body[x][21:len(body[x])-3] for x in reused_by2]
# This block of code gets the info about the node package
root_idx = [i for i, x in enumerate(body) if '"name": "'+name+'"' in x][-1]
k = 0
if (body[1].find(']') > 0):
k = 1
num_contrib = float('nan')
else:
num_contrib = int(body[root_idx+5].split(' ')[5][:-1])
if (body[root_idx+3].split(' ')[-2][0:-1] == 'null'):
commits = float('nan')
else:
commits = int(body[root_idx+3].split(' ')[-2][0:-1])
num_reused_by = int(body[root_idx-3].split(' ')[5][:-3])
impact = float(body[root_idx-5].split(' ')[5][:-1])
downloads = int(body[root_idx+14-k].split(' ')[-1])
citations = int(body[root_idx+22-k].split(' ')[-1])
depend_rank = float(body[root_idx+30-k].split(' ')[-1])
neighborhood_size = float(body[root_idx+1].split(' ')[-2][0:-1])
academic = bool(body[root_idx-2].split(' ')[-2][0:-1])
# Gets the weighted contributor score -- calculated by: the product of
# contributor impact and their credit percentage for the package, summed over all contributors
if (body[1].find('"all_contribs": [],') > -1):
weighted_contributor_value = float('nan')
else:
contrib_impact = [float(body[i-5].split(' ')[-2][0:-1]) for i, x in enumerate(body) if '"person_name":' in x][0:num_contrib]
contrib_weight = [float(body[i+1].split(' ')[-2][0:-1]) for i, x in enumerate(body) if '"person_name":' in x][0:num_contrib]
weighted_contributor_value = np.dot(contrib_impact,contrib_weight)
tags = []
i = 1
while(not ('top_contribs' in body[root_idx+34+i] or ']' in body[root_idx+34+i])):
tags.append([x for x in body[root_idx+34+i].split(' ') if x != ''][0].replace(',','')[1:-1])
i = i + 1
# stores everything
info.append([name,r_or_python, depend_rank,citations,impact,num_reused_by,num_contrib, downloads,reused_name,tags, neighborhood_size, commits, academic])
[names.append(x) for x in reused_name]
names = f7(names)
dependency_rank.append(depend_rank)
impact_score.append(impact)
number_reused_by.append(num_reused_by)
number_contributors.append(num_contrib)
number_downloads.append(downloads)
number_citations.append(citations)
hood_size.append(neighborhood_size)
number_commits.append(commits)
is_academic.append(academic)
contrib_rank.append(weighted_contributor_value)
all_tags.append(tags)
# iterate through the dependencies
j = 1
while(not j == len(names)):
name = names[j]
print(j,name)
# This block of code gets the text mining started
r = urllib.urlopen('http://depsy.org/api/package/'+r_or_python+'/'+name).read()
soup = BeautifulSoup(r)
body1 = soup.find('body')
body = str(body1).split('\n')
# This block of code gets the names of dependent packages (only in r/python)
reused_by1 = [i - 1 for i, x in enumerate(body) if 'neighborhood_size' in x]
reused_by2 = reused_by1[1:len(reused_by1)]
reused_name = [body[x][21:len(body[x])-3] for x in reused_by2]
# This block of code gets the info about the node package
root_idx = [i for i, x in enumerate(body) if '"name": "'+name+'"' in x][-1]
k = 0
if (body[1].find(']') > 0):
k = 1
num_contrib = float('nan')
else:
num_contrib = int(body[root_idx+5].split(' ')[5][:-1])
if (body[root_idx+3].split(' ')[-2][0:-1] == 'null'):
commits = float('nan')
else:
commits = int(body[root_idx+3].split(' ')[-2][0:-1])
num_reused_by = int(body[root_idx-3].split(' ')[5][:-3])
impact = float(body[root_idx-5].split(' ')[5][:-1])
downloads = int(body[root_idx+14-k].split(' ')[-1])
citations = int(body[root_idx+22-k].split(' ')[-1])
depend_rank = float(body[root_idx+30-k].split(' ')[-1])
neighborhood_size = float(body[root_idx+1].split(' ')[-2][0:-1])
academic = bool(body[root_idx-2].split(' ')[-2][0:-1])
if (body[1].find('"all_contribs": [],') > -1):
weighted_contributor_value = float('nan')
else:
contrib_impact = [float(body[i-5].split(' ')[-2][0:-1]) for i, x in enumerate(body) if '"person_name":' in x][0:num_contrib]
contrib_weight = [float(body[i+1].split(' ')[-2][0:-1]) for i, x in enumerate(body) if '"person_name":' in x][0:num_contrib]
weighted_contributor_value = np.dot(contrib_impact,contrib_weight)
tags = []
i = 1
while(not ('top_contribs' in body[root_idx+34+i] or ']' in body[root_idx+34+i])):
tags.append([x for x in body[root_idx+34+i].split(' ') if x != ''][0].replace(',','')[1:-1])
i = i + 1
# stores everything
info.append([name,r_or_python, depend_rank,citations,impact,num_reused_by,num_contrib, downloads,reused_name,tags, neighborhood_size, commits, academic])
[names.append(x) for x in reused_name]
names = f7(names)
dependency_rank.append(depend_rank)
impact_score.append(impact)
number_reused_by.append(num_reused_by)
number_contributors.append(num_contrib)
number_downloads.append(downloads)
number_citations.append(citations)
hood_size.append(neighborhood_size)
number_commits.append(commits)
is_academic.append(academic)
contrib_rank.append(weighted_contributor_value)
all_tags.append(tags)
j = j + 1
## creates and edge list
Source = []
Target = []
for l in range(len(info)):
for m in range(len(info[l][8])):
Source.append(info[l][8][m])
Target.append(info[l][0])
# making the csv for the edge list
df = | pd.DataFrame({'Source': Source, 'Target': Target}) | pandas.DataFrame |
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from finmarketpy.economics.techindicator import TechParams, TechIndicator
tech_params = TechParams(fillna=True, atr_period=14, sma_period=3,
green_n=4, green_count=9, red_n=2, red_count=13)
tech_ind = TechIndicator()
dates = pd.date_range(start='1/1/2018', end='1/08/2018')
def get_cols_name(n):
return ['Asset%d.close' % x for x in range(1, n + 1)]
def test_sma():
indicator_name = 'SMA'
# Test Case 1: constant prices
cols = get_cols_name(1)
data_df = pd.DataFrame(index=dates, columns=cols, data=1)
tech_ind.create_tech_ind(data_df, indicator_name, tech_params)
expected_signal_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name, 'Signal'])
for col in cols], data=-1)
expected_df = pd.DataFrame(index=dates, columns=[' '.join([col, indicator_name])
for col in cols], data=1)
expected_signal_df.iloc[:tech_params.sma_period] = np.nan
expected_df.iloc[:tech_params.sma_period - 1] = np.nan
df = tech_ind.get_techind()
signal_df = tech_ind.get_signal()
| assert_frame_equal(df, expected_df) | pandas.testing.assert_frame_equal |
import pandas as pd
from prophet import Prophet
def prediction(pollutant, city, date):
pollutant_choice = pollutant + " AQI"
# read the csv file into a dataframe
df = pd.read_csv('pollution_us_2000_2016.csv')
# delete unnecessary data columns
df = df.drop(columns=['Unnamed: 0', 'NO2 Units', 'O3 Units', 'SO2 Units', 'CO Units'])
# delete duplicate data tuples
df.drop_duplicates(inplace=True)
# convert Date local to python date and time
df['date'] = | pd.to_datetime(df['Date Local']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 14:04:13 2020
@authors: <NAME>, <NAME>
Huffman Encoding
"""
import pandas as pd
class Node:
def __init__(self, char=None, freq=None, node1=None, node2=None):
self.left = node1
self.right = node2
if(char==None):
self.char = node1.char+node2.char
self.freq = node1.freq+node2.freq
else:
self.char = char
self.freq = freq
def PrintTree(self, level=0):
dashes=""
for x in range(level):
dashes=dashes+"--"
print(dashes,self.char,":",self.freq)
level=level+1
if(self.left!=None):
self.left.PrintTree(level=level)
if(self.right!=None):
self.right.PrintTree(level=level)
def CreateHuffmanDict(self, level=0, code=""):
level=level+1
huffman_dict={}
if(self.left==None and self.right==None):
huffman_dict[self.char] = code
return huffman_dict
else:
if(self.left!=None):
huffman_dict_left=self.left.CreateHuffmanDict(level=level, code= code + "0")
huffman_dict.update(huffman_dict_left)
if(self.right!=None):
huffman_dict_right=self.right.CreateHuffmanDict(level=level, code=code + "1")
huffman_dict.update(huffman_dict_right)
return huffman_dict
def huffman_encode(huffman_dict,text):
encoded_text=""
for char in text:
# print(char)
# print(huffman_dict[char])
encoded_text=encoded_text + huffman_dict[char]
return encoded_text
def huffman_decode(root_node,encoded_text):
decoded_text=""
next_node=root_node
for bit in encoded_text:
if(next_node.left==None and next_node.right==None):
decoded_text=decoded_text + next_node.char
if(bit=="0"):
next_node=root_node.left
if(bit=="1"):
next_node=root_node.right
elif(bit=="0"):
next_node=next_node.left
else:
next_node=next_node.right
decoded_text=decoded_text + next_node.char
return decoded_text
#########START############
#Read original text from file
file_name="text1"
f = open("input/"+file_name+".txt", "r", encoding='utf8')
text=f.read()
f.close()
#Create a dataframe contains each unique char with frequency and node
char_list=[]
freq_list=[]
node_list=[]
unique_char_set=set(text)
freq_dict = {}
for char in unique_char_set:
freq_dict[char]=0
for char in text:
freq_dict[char]=freq_dict[char]+1
for key, value in freq_dict.items():
char_list.append(key)
freq_list.append(value)
node_list.append(Node(char=key,freq=value))
data_tuples = list(zip(char_list,freq_list,node_list))
huffman_df= pd.DataFrame(data_tuples, columns=['Char','Freq','Node'])
print(huffman_df)
#Create Huffman Tree from frequencies
while(len(huffman_df)>1):
huffman_df=huffman_df.sort_values(by=['Freq'])
merged_node = Node(node1=huffman_df['Node'].values[0],node2=huffman_df['Node'].values[1])
huffman_df = huffman_df.iloc[2:]
merged_node_tuples = (merged_node.char,merged_node.freq,merged_node)
merged_node_df= | pd.DataFrame([merged_node_tuples], columns=['Char','Freq','Node']) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provide the TimeSeries and TimeSeriesEvent classes.
The classes defined in this module are accessible directly from the toplevel
Kinetics Toolkit's namespace (i.e. ktk.TimeSeries, ktk.TimeSeriesEvent)
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 <NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import kineticstoolkit._repr
from kineticstoolkit.decorators import unstable, deprecated, directory
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pandas as pd
import limitedinteraction as li
from dataclasses import dataclass
import warnings
from ast import literal_eval
from copy import deepcopy
from typing import Dict, List, Tuple, Any, Union, Optional
import kineticstoolkit as ktk # For doctests
WINDOW_PLACEMENT = {'top': 50, 'right': 0}
def dataframe_to_dict_of_arrays(
dataframe: pd.DataFrame) -> Dict[str, np.ndarray]:
"""
Convert a pandas DataFrame to a dict of numpy ndarrays.
This function mirrors the dict_of_arrays_to_dataframe function. It is
mainly used by the TimeSeries.from_dataframe method.
Parameters
----------
pd_dataframe
The dataframe to be converted.
Returns
-------
Dict[str, np.ndarray]
Examples
--------
In the simplest case, each dataframe column becomes a dict key.
>>> df = pd.DataFrame([[0, 3], [1, 4], [2, 5]])
>>> df.columns = ['column1', 'column2']
>>> df
column1 column2
0 0 3
1 1 4
2 2 5
>>> data = dataframe_to_dict_of_arrays(df)
>>> data['column1']
array([0, 1, 2])
>>> data['column2']
array([3, 4, 5])
If the dataframe contains similar column names with indices in brackets
(for example, Forces[0], Forces[1], Forces[2]), then these columns are
combined in a single array.
>>> df = pd.DataFrame([[0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8, 11]])
>>> df.columns = ['Forces[0]', 'Forces[1]', 'Forces[2]', 'Other']
>>> df
Forces[0] Forces[1] Forces[2] Other
0 0 3 6 9
1 1 4 7 10
2 2 5 8 11
>>> data = dataframe_to_dict_of_arrays(df)
>>> data['Forces']
array([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]])
>>> data['Other']
array([ 9, 10, 11])
"""
# Remove spaces in indexes between brackets
columns = dataframe.columns
new_columns = []
for i_column, column in enumerate(columns):
splitted = column.split('[')
if len(splitted) > 1: # There are brackets
new_columns.append(
splitted[0] + '[' + splitted[1].replace(' ', '')
)
else:
new_columns.append(column)
dataframe.columns = columns
# Search for the column names and their dimensions
# At the end, we end with something like:
# dimensions['Data1'] = []
# dimensions['Data2'] = [[0], [1], [2]]
# dimensions['Data3'] = [[0,0],[0,1],[1,0],[1,1]]
dimensions = dict() # type: Dict[str, List]
for column in dataframe.columns:
splitted = column.split('[')
if len(splitted) == 1: # No brackets
dimensions[column] = []
else: # With brackets
key = splitted[0]
index = literal_eval('[' + splitted[1])
if key in dimensions:
dimensions[key].append(index)
else:
dimensions[key] = [index]
n_samples = len(dataframe)
# Assign the columns to the output
out = dict() # type: Dict[str, np.ndarray]
for key in dimensions:
if len(dimensions[key]) == 0:
out[key] = dataframe[key].to_numpy()
else:
highest_dims = np.max(np.array(dimensions[key]), axis=0)
columns = [
key + str(dim).replace(' ', '')
for dim in sorted(dimensions[key])
]
out[key] = dataframe[columns].to_numpy()
out[key] = np.reshape(
out[key], [n_samples] + (highest_dims + 1).tolist()
)
return out
def dict_of_arrays_to_dataframe(
dict_of_arrays: Dict[str, np.ndarray]) -> pd.DataFrame:
"""
Convert a dict of ndarray of any dimension to a pandas DataFrame.
This function mirrors the dataframe_to_dict_of_arrays function. It is
mainly used by the TimeSeries.to_dataframe method.
The rows in the output DataFrame correspond to the first dimension of the
numpy arrays.
- Vectors are converted to single-column DataFrames.
- 2-dimensional arrays are converted to multi-columns DataFrames.
- 3-dimensional (or more) arrays are also converted to DataFrames, but
indices in brackets are added to the column names.
Parameters
----------
dict_of_array
A dict that contains numpy arrays. Each array must have the same
first dimension's size.
Returns
-------
DataFrame
Example
-------
>>> data = dict()
>>> data['Forces'] = np.arange(12).reshape((4, 3))
>>> data['Other'] = np.arange(4)
>>> data['Forces']
array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
>>> data['Other']
array([0, 1, 2, 3])
>>> df = dict_of_arrays_to_dataframe(data)
>>> df
Forces[0] Forces[1] Forces[2] Other
0 0 1 2 0
1 3 4 5 1
2 6 7 8 2
3 9 10 11 3
It also works with higher dimensions:
>>> data = {'3d_data': np.arange(8).reshape((2, 2, 2))}
>>> data['3d_data']
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> df = dict_of_arrays_to_dataframe(data)
>>> df
3d_data[0,0] 3d_data[0,1] 3d_data[1,0] 3d_data[1,1]
0 0 1 2 3
1 4 5 6 7
"""
# Init
df_out = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
| tm.assert_series_equal(result_s_na, expected_s_na) | pandas.util.testing.assert_series_equal |
"""
We'll want to keep a history of all assets,
prices and portfolio positions for backtesting.
Here we keep this history in a data frame and visit
different objects to record their status.
"""
from abc import ABC, abstractmethod
from datetime import datetime
from weakref import WeakSet
import pandas as pd
import pxtrade
from pxtrade.assets import Asset, FxRate, Portfolio
class Visitor(ABC):
"""Our history instance can visit different objects
and record data relating to them.
"""
@abstractmethod
def visit(self, instance):
raise NotImplementedError() # pragma: no cover
class AssetVisitor(Visitor):
def visit(self, instance):
return [(instance.code, instance.local_value)]
class FxRateVisitor(Visitor):
def visit(self, instance):
return [(instance.pair, instance.rate)]
class PortfolioVisitor(Visitor):
def visit(self, instance):
portfolio = instance
portfolio_code = portfolio.code
rows = []
rows.append((portfolio_code, portfolio.value))
for asset in Asset.get_instances():
asset_code = asset.code
rows.append(
(
portfolio_code + "_" + asset_code,
portfolio.get_holding_units(asset_code),
)
)
return rows
class History:
instances = WeakSet()
def __init__(self, portfolios, *, backtest=None):
self.instances.add(self)
self._history = pd.DataFrame()
self._asset_visitor = AssetVisitor()
self._fx_rate_visitor = FxRateVisitor()
self._portfolio_visitor = PortfolioVisitor()
if isinstance(portfolios, Portfolio):
portfolios = [portfolios]
if not isinstance(portfolios, list):
raise TypeError("Expecting portfolio or list of portfolios.")
for portfolio in portfolios:
if not isinstance(portfolio, Portfolio):
raise TypeError("Expecting Portfolio instance.")
if backtest is not None:
if not isinstance(backtest, pxtrade.backtest.Backtest):
raise TypeError("Expecting Backtest instance.")
self._portfolios = portfolios
self._backtest = backtest
def _get_visitor(self, instance):
if isinstance(instance, Asset):
return self._asset_visitor
if isinstance(instance, FxRate):
return self._fx_rate_visitor
if isinstance(instance, Portfolio):
return self._portfolio_visitor
raise NotImplementedError(
"Unable to record history for " + instance.__class__.__name__
)
def take_snapshot(self, date_time):
if not isinstance(date_time, datetime):
raise TypeError("Expecting datetime instance.")
instances = list()
instances.extend(Asset.get_instances())
instances.extend(FxRate.get_instances())
instances.extend(self._portfolios)
get_visitor = self._get_visitor
snapshot = | pd.Series(dtype=object) | pandas.Series |
"""
**The Voyager class takes advantage from the executable file voyager.exe to launch CUBE scripts using command lines.**
Launching a CUBE script with a command line requires the voyager folder that contains voyager.exe to belong to the
system(windows) path. In order to be able to launch CUBE scripts using command lines, you must add the voyager folder
to the system path.
If CUBE has been properly installed, this folder must be in the program files, maybe at:
*'C:\Program Files (x86)\Citilabs\CubeVoyager'*.
It may be added to one's system path with the following method:
* Panneau de configuration\Tous les Panneaux de configuration\Système
* Paramètres système avancés
* Variable d'environnement
* Variables utilisateur
* PATH (create PATH or add your voyager folder to its list)
example:
::
import pycube
voyager = pycube.voyager.Voyager(r'N:/python/voyager')
voyager.build_net(node_dataframe, link_dataframe , r"Q:\cube_network.net")
"""
import os
import pandas as pd
import shapely
from syspy.io.pandasdbf import pandasdbf
from syspy.io.pandasshp import pandasshp
class Voyager:
def __init__(self, environment):
self.environment = environment
def mat_to_dbf(
self,
input_matrix,
output_dbf,
fields=None,
n_tab=1,
debug=False
):
"""
creates a dbf from a cube matrix,
requires fields OR n_tab = len(fields)
:param input_matrix: path to a cube matrix (.mat)
dont forget .mat in the name !
:type input_matrix: str
:param output_dbf: path to the dbf to create
:type output_dbf: str
:param fields: list of the fields of the input matrix
:type fields: list
:param n_tab: number of tabs of the matrix
(required if the fields are not provided)
:type n_tab: int
:param debug: switch to manual control of the script launcher if True
:type debug: bool
:return: None
"""
script_text = r"""
RUN PGM=MATRIX PRNFILE="format_env\mat_to_dbf.prn" MSG='mat_to_dbf'
FILEI MATI[1] = filei_mati
FILEO RECO[1] = fileo_reco,
FIELDS = I, J, field_names
JLOOP
RO.I=I
RO.J=J
rec_in_jloop
WRITE RECO = 1
ENDJLOOP
ENDRUN
"""
if not fields:
tabs = ['tab_%i' % (i + 1) for i in range(n_tab)]
fields = tabs
else:
n_tab = len(fields)
field_names = ', '.join(fields)
filei_mati = '"%s"' % input_matrix
fileo_reco = '"%s"' % output_dbf
rec_in_jloop = ' '.join(['RO.%s = MI.1.%s \n' % (fields[i], i + 1) for i in range(n_tab)])
# creating a cube script
script = open(self.environment + r'\mat_to_dbf.s', 'w', encoding='latin')
script.write(script_text.replace(
'format_env', self.environment).replace(
'filei_mati', filei_mati).replace(
'fileo_reco', fileo_reco).replace(
'field_names', field_names).replace(
'rec_in_jloop', rec_in_jloop))
script.close()
# runs the script with voyager.exe
options = """/Start /CloseWhenDone /Minimize /NoSplash""" if not debug else ""
os.system('voyager.exe "' + self.environment + r'\mat_to_dbf.s" ' + options)
def mat_to_csv(
self,
input_matrix,
output_csv,
fields=None,
n_tab=1,
debug=False,
i='origin',
j='destination'
):
"""
creates a csv from a cube matrix, requires fields OR n_tab = len(fields)
:param input_matrix: path to a cube matrix (.mat)
:type input_matrix: str
:param output_csv: path to the csv to create ()
:type output_csv: str
:param fields: list of the fields of the input matrix
:type fields: list
:param n_tab: number of tabs of the matrix (required if the fields are not provided)
:type n_tab: int
:param debug: switch to manual control of the script launcher if True
:type debug: bool
:return: None
"""
script_text = r"""
RUN PGM=MATRIX PRNFILE="format_env\mat_to_csv.prn" MSG='mat_to_csv'
FILEI MATI[1] = filei_mati
FILEO PRINTO[1] = fileo_printo
print_headers
JLOOP
print_in_jloop
ENDJLOOP
ENDRUN
"""
if fields is None:
tabs = ['tab_%i' % (i + 1) for i in range(n_tab)]
fields = tabs
else:
n_tab = len(fields)
field_names = ', '.join(fields)
filei_mati = '"%s"' % input_matrix
fileo_printo = '"%s"' % output_csv
print_headers = 'IF (I = 1) \n PRINT LIST ="' + '" ,";" ,"'.join([i, j] + fields) + '" PRINTO = 1 \n ENDIF'
print_assignation = ' '.join(['%s = MI.1.%s \n' % (fields[i].replace(' ', '_'), i + 1) for i in range(n_tab)])
print_statement = 'PRINT LIST = I, ";", J, ";", ' + ',";",'.join([f.replace(' ', '_') for f in fields]) + ' PRINTO = 1'
print_in_jloop = print_assignation + ' \n' + print_statement
# creating a cube script
script = open(self.environment + r'\mat_to_csv.s', 'w', encoding='latin')
script.write(script_text.replace(
'format_env', self.environment).replace(
'filei_mati', filei_mati).replace(
'fileo_printo', fileo_printo).replace(
'field_names', field_names).replace(
'print_in_jloop', print_in_jloop).replace('print_headers', print_headers))
script.close()
# runs the script with voyager.exe
options = """/Start /CloseWhenDone /Minimize /NoSplash""" if not debug else ""
os.system('voyager.exe "' + self.environment + r'\mat_to_csv.s" ' + options)
def net_to_dbf(self, input_network, output_links, output_nodes, debug=False):
"""
creates a dbf from a cube network
:param input_network: path to a cube network (.net)
:type input_network: str
:param output_links: path to the linkfile dbf create ()
:type output_links: str
:param output_nodes: path to the nodefile dbf to create ()
:type output_nodes: str
:param debug: switch to manual control of the script launcher if True
:type debug: bool
:return: None
"""
script_text = r"""
RUN PGM=NETWORK PRNFILE="%s\net_to_dbf.prn"
FILEI LINKI[1] = "%s"
FILEO LINKO = "%s"
FILEO NODEO = "%s"
ENDRUN
""" % (self.environment, input_network, output_links, output_nodes)
# creating a cube script
script = open(self.environment + r'\net_to_dbf.s', 'w', encoding='latin')
script.write(script_text)
script.close()
# runs the script with voyager.exe
options = """/Start /CloseWhenDone /Minimize /NoSplash""" if not debug else ""
os.system('voyager.exe "' + self.environment + r'\net_to_dbf.s" ' + options)
def build_net_from_links_shape(
self, links, output_network, first_node=0, length=False, debug=False,
add_symmetric=False, write_shp=False, shp_kwargs={}
):
name = output_network.replace('.net', '').replace('.NET', '')
links_to_shp = name + '_links.shp'
nodes_to_shp = name + '_nodes.shp'
links['coordinates_a'] = links['geometry'].apply(lambda c: c.coords[-1])
links['coordinates_b'] = links['geometry'].apply(lambda c: c.coords[0])
coordinate_list = list(set(list(links['coordinates_a'])).union(list(links['coordinates_b'])))
coordinate_dict = {first_node + i: coordinate_list[i] for i in range(len(coordinate_list))}
nodes = pd.DataFrame(pd.Series(coordinate_dict)).reset_index()
nodes.columns = ['n', 'coordinates']
links = pd.merge(links, nodes.rename(columns={'coordinates': 'coordinates_a'}), on='coordinates_a', how='left')
links = pd.merge(links, nodes.rename(columns={'coordinates': 'coordinates_b'}), on='coordinates_b', how='left',
suffixes=['_a', '_b'])
links.drop(['a', 'b', 'A', 'B', 'coordinates_a', 'coordinates_b'], axis=1, errors='ignore', inplace=True)
links.rename(columns={'n_a': 'a', 'n_b': 'b'}, inplace=True)
links = links.groupby(['a', 'b'], as_index=False).first()
links = pandasdbf.convert_stringy_things_to_string(links)
if length:
links[length] = links['geometry'].apply(lambda g: g.length)
if add_symmetric:
sym = links.copy()
sym['a'], sym['b'] = links['b'], links['a']
sym = sym[sym['a'] != sym['b']]
links = pd.concat([links, sym])
nodes['geometry'] = nodes['coordinates'].apply(shapely.geometry.point.Point)
if write_shp:
pandasshp.write_shp(nodes_to_shp, nodes, **shp_kwargs)
pandasshp.write_shp(links_to_shp, links, **shp_kwargs)
links.drop(['geometry'], axis=1, errors='ignore', inplace=True)
self.build_net(nodes[['n', 'geometry']], links.fillna(0), output_network, debug=debug)
def build_net(self, nodes, links, output_network, from_geometry=True, debug=False):
"""
creates a Cube .NET from links and nodes geoDataFrames
:param output_network: path to a cube network (.net)
:type output_network: str
:param nodes:
:type nodes: pd.DataFrame with geometry field
:param links:
:type links: pd.DataFrame
:param from_geometry: calculate x and y fields from a shapely geometry if True
:type debug: bool
:param debug: switch to manual control of the script launcher if True
:type debug: bool
:return: None
"""
_nodes = nodes.copy()
_links = links.copy()
if from_geometry:
_nodes[['x', 'y']] = _nodes['geometry'].apply(lambda g: | pd.Series([g.coords[0][0], g.coords[0][1]]) | pandas.Series |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, raise_on_error=False, typ=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in | compat.iteritems(self._frames) | pandas.compat.iteritems |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.