prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import datetime as dt
import importlib
import json
import logging
import time
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy import Column, Integer, String, DateTime, Float
from sqlalchemy.sql.sqltypes import TIMESTAMP, VARCHAR, FLOAT, INTEGER
import iotfunctions
from . import db as db_module
from .automation import (TimeSeriesGenerator, DateGenerator, MetricGenerator, CategoricalGenerator)
from .exceptions import StageException
from .pipeline import (CalcPipeline, DropNull, JobController, JobLogNull, Trace, AggregateItems)
from .stages import (DataReader, DataWriter, DataWriterFile)
from .util import (MemoryOptimizer, build_grouper, categorize_args, reset_df_index)
logger = logging.getLogger(__name__)
def retrieve_entity_type_metadata(raise_error=True, **kwargs):
"""
Get server metadata for entity type
"""
db = kwargs['_db']
# get kpi functions metadata
meta = db.http_request(object_type='engineInput', object_name=kwargs['logical_name'], request='GET',
raise_error=raise_error)
try:
meta = json.loads(meta)
except (TypeError, json.JSONDecodeError):
meta = None
if meta is None or 'exception' in meta:
raise RuntimeError(('API call to server did not retrieve valid entity '
' type properties for %s.' % kwargs['logical_name']))
if meta['kpiDeclarations'] is None:
meta['kpiDeclarations'] = []
logger.warning(('This entity type has no calculated kpis'))
# cache function catalog metadata in the db object
function_list = [x['functionName'] for x in meta['kpiDeclarations']]
db.load_catalog(install_missing=True, function_list=function_list)
# map server properties
params = {}
params['_entity_type_id'] = meta['entityTypeId']
params['_db_schema'] = meta['schemaName']
params['name'] = meta['metricsTableName']
params['_timestamp'] = meta['metricTimestampColumn']
params['_dimension_table_name'] = meta['dimensionsTable']
params['_data_items'] = meta['dataItems']
# constants
c_meta = db.http_request(object_type='constants', object_name=kwargs['logical_name'], request='GET')
try:
c_meta = json.loads(c_meta)
except (TypeError, json.JSONDecodeError):
logger.debug(('API call to server did not retrieve valid entity type'
' properties. No properties set.'))
else:
for p in c_meta:
key = p['name']
if isinstance(p['value'], dict):
params[key] = p['value'].get('value', p['value'])
else:
params[key] = p['value']
logger.debug('Retrieved server constant %s with value %s', key, params[key])
params = {**kwargs, **params}
return (params, meta)
class EntityType(object):
"""
Data is organised around Entity Types. Entity Types have one or more
physical database object for their data. When creating a new Entity Type,
it will attempt to connect itself to a table of the same name in the
database. If no table exists the Entity Type will create one.
Entity types describe the payload of an AS job. A job is built by a
JobController using functions metadata prepared by the Entity Type.
Metadata prepared is:
_functions:
List of function objects
_data_items:
List of data items and all of their metadata such as their
datatype.
_granularities_dict:
Dictionary keyed on granularity name. Contains a granularity object
that provides access to granularity metadata such as the time
level and other dimensions involved in the aggregation.
_schedules_dict:
Dictionary keyed on a schedule frequency containing other metadata
about the operations to be run at this frequency, e.g. how many days
should be backtracked when retrieving daat.
Entity types may be initialized as client objects for local testing
or may be loaded from the server. After initialization all of the
above instance variables will be populated. The metadata looks the same
regardless of whether the entity type was loaded from the server
or initialized on the client. The logic to build the metadata is
different though.
Parameters
----------
name: str
Name of the entity type. Use lower case. Will be used as the physical
database table name so don't use database reserved works of special
characters.
db: Database object
Contains the connection info for the database
*args:
Additional positional arguments are used to add the list of SQL Alchemy
Column objects contained within this table. Similar to the style of a
CREATE TABLE sql statement. There is no need to specify column names
if you are using an existing database table as an entity type.
**kwargs
Additional keywork args.
_timestamp: str
Overide the timestamp column name from the default of 'evt_timestamp'
"""
is_entity_type = True
is_local = False
auto_create_table = True
aggregate_complete_periods = True # align data for aggregation with time grain to avoid partial periods
log_table = 'KPI_LOGGING' # deprecated, to be removed
checkpoint_table = 'KPI_CHECKPOINT' # deprecated,to be removed
chunk_size = None # use job controller default chunk
default_backtrack = None
trace_df_changes = True
drop_existing = False
# These two columns will be available in the dataframe of a pipeline
_entity_id = 'deviceid' # identify the instance
_timestamp_col = '_timestamp' # copy of the event timestamp from the index
# This column will identify an instance in the index
_df_index_entity_id = 'id'
# when automatically creating a new dimension, use this suffix
_auto_dim_suffix = '_auto_dim'
# when looking for an automatically created numeric index it should be named:
auto_index_name = '_auto_index_'
# constants declared as part of an entity type definition
ui_constants = None
_functions = None
# generator
_scd_frequency = '2D' # deprecated. Use parameters on EntityDataGenerator
_activity_frequency = '3D' # deprecated. Use parameters on EntityDataGenerator
_start_entity_id = 73000 # deprecated. Use parameters on EntityDataGenerator
_auto_entity_count = 5 # deprecated. Use parameters on EntityDataGenerator
# pipeline work variables stages
_dimension_table = None
_scd_stages = None
_custom_calendar = None
# variabes that will be set when loading from the server
_entity_type_id = None
logical_name = None
_timestamp = 'evt_timestamp'
_dimension_table_name = None
_db_connection_dbi = None
_db_schema = None
_data_items = None
tenant_id = None
_entity_filter_list = None
_start_ts_override = None
_end_ts_override = None
_stages = None
_schedules_dict = None
_granularities_dict = None
_input_set = None
_output_list = None
_invalid_stages = None
_disabled_stages = None
# processing defaults
_pre_aggregate_time_grain = None # aggregate incoming data before processing
_auto_read_from_ts_table = True # read new data from designated time series table for the entity
_pre_agg_rules = None # pandas agg dictionary containing list of aggregates to apply for each item
_pre_agg_outputs = None # dictionary containing list of output items names for each item
_data_reader = DataReader
_abort_on_fail = False
_auto_save_trace = 30
save_trace_to_file = False
drop_null_class = DropNull
enable_downcast = False
allow_projection_list_trim = True
_write_usage = False
# deprecated class variables (to be removed)
_checkpoint_by_entity = True # manage a separate checkpoint for each entity instance
_is_initial_transform = True
_is_preload_complete = False
def __init__(self, name, db, *args, **kwargs):
logger.debug('Initializing new entity type using iotfunctions %s', iotfunctions.__version__)
try:
self.logical_name = kwargs.get('logical_name', None)
if self.logical_name is None:
self.logical_name = name
except AttributeError:
self.logical_name = name
if db == None:
name = 'None'
elif db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
self.name = name
self.description = kwargs.get('description', None)
if self.description is None:
self.description = ''
else:
del (kwargs['description'])
self.activity_tables = {}
self.scd = {}
self.db = db
if self.db is not None:
self.tenant_id = self.db.tenant_id
self._system_columns = [self._entity_id, self._timestamp_col, 'logicalinterface_id', 'devicetype', 'format',
'updated_utc', self._timestamp]
self._stage_type_map = self.default_stage_type_map()
self._custom_exclude_col_from_auto_drop_nulls = []
self._drop_all_null_rows = True
if self._scd_stages is None:
self._scd_stages = []
if self._data_items is None:
self._data_items = []
if self._granularities_dict is None:
self._granularities_dict = {}
# additional params set from kwargs
self.set_params(**kwargs)
# Start a trace to record activity on the entity type
self._trace = Trace(object_name=None, parent=self, db=db)
if self._disabled_stages is None:
self._disabled_stages = []
if self._invalid_stages is None:
self._invalid_stages = []
if len(self._disabled_stages) > 0 or len(self._invalid_stages) > 0:
self.trace_append(created_by=self, msg='Skipping disabled and invalid stages', log_method=logger.info,
**{'skipped_disabled_stages': [s['functionName'] for s in self._disabled_stages],
'skipped_disabled_data_items': [s['output'] for s in self._disabled_stages],
'skipped_invalid_stages': [s['functionName'] for s in self._invalid_stages],
'skipped_invalid_data_items': [s['output'] for s in self._invalid_stages]})
# attach to time series table
if self._db_schema is None:
logger.warning(('No _db_schema specified in **kwargs. Using'
'default database schema.'))
self._mandatory_columns = [self._timestamp, self._entity_id]
# separate args into categories
categories = [('constant', 'is_ui_control', None), ('granularity', 'is_granularity', None),
('function', 'is_function', None), ('column', None, Column)]
categorized = categorize_args(categories, 'functions', *args)
cols = list(categorized.get('column', []))
functions = list(categorized.get('function', []))
constants = list(categorized.get('constant', []))
grains = list(categorized.get('granularity', []))
if self.drop_existing and db is not None and not self.is_local:
self.drop_tables()
# create a database table if needed using cols
if name is not None and db is not None and not self.is_local:
try:
self.table = self.db.get_table(self.name, self._db_schema)
except KeyError:
if self.auto_create_table:
ts = db_module.TimeSeriesTable(self.name, self.db, *cols, **kwargs)
self.table = ts.table
# self.db.create()
msg = 'Create table %s' % self.name
logger.info(msg)
else:
msg = ('Database table %s not found. Unable to create'
' entity type instance. Provide a valid table name'
' or use the auto_create_table = True keyword arg'
' to create a table. ' % (self.name))
raise ValueError(msg)
# populate the data items metadata from the supplied columns
if isinstance(self._data_items, list) and len(self._data_items) == 0:
self._data_items = self.build_item_metadata(self.table)
else:
logger.warning((
'Created a logical entity type. It is not connected to a real database table, so it cannot perform any database operations.'))
# add granularities
for g in grains:
logger.debug('Adding granularity to entity type: %s', g.name)
self._granularities_dict[g.name] = g
# add constants
self.ui_constants = constants
self.build_ui_constants()
# _functions
# functions may have been provided as kwarg and may be includes as args
# compbine all
if self._functions is None:
self._functions = []
self._functions.extend(functions)
if name is not None and db is not None and not self.is_local:
db.entity_type_metadata[self.logical_name] = self
logger.debug(('Initialized entity type %s'), str(self))
def add_activity_table(self, name, activities, *args, **kwargs):
"""
add an activity table for this entity type.
parameters
----------
name: str
table name
activities: list of strs
activity type codes: these identify the nature of the activity, e.g. PM is Preventative Maintenance
*args: Column objects
other columns describing the activity, e.g. materials_cost
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to add activity tables ')
raise ValueError(msg)
kwargs['_activities'] = activities
kwargs['schema'] = self._db_schema
# name = name.lower()
if self.db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
table = db_module.ActivityTable(name, self.db, *args, **kwargs)
try:
sqltable = self.db.get_table(name, self._db_schema)
except KeyError:
table.create()
self.activity_tables[name] = table
def add_slowly_changing_dimension(self, property_name, datatype, **kwargs):
"""
add a slowly changing dimension table containing a single property for this entity type
parameters
----------
property_name : str
name of property, e.g. firmware_version (lower case, no database reserved words)
datatype: sqlalchemy datatype
"""
if self.db is None:
msg = ('Entity type has no db connection. Local entity types'
' are not allowed to add slowly changing dimensions ')
raise ValueError(msg)
property_name = property_name.lower()
name = '%s_scd_%s' % (self.name, property_name)
kwargs['schema'] = self._db_schema
if self.db.db_type == 'db2':
name = name.upper()
else:
name = name.lower()
table = db_module.SlowlyChangingDimension(name=name, database=self.db, property_name=property_name,
datatype=datatype, **kwargs)
try:
self.db.get_table(name, self._db_schema)
except KeyError:
table.create()
self.scd[property_name] = table
def _add_scd_pipeline_stage(self, scd_lookup):
self._scd_stages.append(scd_lookup)
def build_agg_dict_from_meta_list(self, meta_list):
agg_dict = OrderedDict()
input_items = set()
output_items = []
for f in meta_list:
input_item = f['input'].get('source')
output_item = f['output'].get('name')
aggregate = f['functionName']
try:
agg_dict[input_item].append(aggregate)
except KeyError:
agg_dict[input_item] = [aggregate]
input_items.add(input_item)
output_items.append(output_item)
return (agg_dict, input_items, output_items)
def build_arg_metadata(self, obj):
"""
Examine the metadata provided by build_ui() to understand more about
the arguments to a function.
Place the values of inputs and outputs into 2 dicts
Return these two dicts in a tuple along with an output_meta dict
that contains argument values and types
Build the _input_set and _output list. These describe the set of
data items required as inputs to a function and the list of data
items produced by the function.
"""
name = obj.__class__.__name__
try:
(inputs, outputs) = obj.build_ui()
except (AttributeError, NotImplementedError) as e:
try:
fn_metadata = obj.metadata()
inputs = fn_metadata.get('input', None)
outputs = fn_metadata.get('output', None)
except (AttributeError, KeyError) as ex:
msg = ('Can\'t get metadata for function %s. Implement the'
' build_ui() method for this function. %s' % (name, str(e)))
raise NotImplementedError(msg)
input_args = {} # this is not used. Included only to maintain compatibility of return signature
output_args = {} # this is not used. Included only to maintain compatibility of return signature
output_meta = {} # this is not used. Included only to maintain compatibility of return signature
output_list = []
# There are two ways to gather inputs to a function.
# 1) from the arguments of the function
# 2) from the an explicit list of items returned by the get_input_items
# method
try:
input_set = set(obj.get_input_items())
except AttributeError:
input_set = set()
else:
if len(input_set) > 0:
logger.debug(('Function %s has explicit required input items '
' delivered by the get_input_items() method: %s'), name, input_set)
if not isinstance(inputs, list):
raise TypeError(('Function registration metadata must be defined',
' using a list of objects derived from iotfunctions',
' BaseUIControl. Check metadata for %s'
' %s ' % (name, inputs)))
if not isinstance(outputs, list):
raise TypeError(('Function registration metadata must be defined',
' using a list of objects derived from iotfunctions',
' BaseUIControl. Check metadata for %s'
' %s ' % (name, outputs)))
args = []
args.extend(inputs)
args.extend(outputs)
for a in args:
try:
# get arg name and type from UI object
type_ = a.type_
arg = a.name
except AttributeError as e:
try:
# get arg name and type from legacy dict
type_ = a.get('type', None)
arg = a.get('name', None)
except AttributeError:
type_ = None
arg = None
if type_ is None or arg is None:
msg = ('Error while getting metadata from function. The inputs'
' and outputs of the function are not described correctly'
' using UIcontrols with a type_ %s and name %s' % (type_, arg))
raise TypeError(msg)
arg_value = getattr(obj, arg)
out_arg = None
out_arg_value = None
if type_ == 'DATA_ITEM':
# the argument is an input that contains a data item or
# list of data items
if isinstance(arg_value, list):
input_set |= set(arg_value)
else:
input_set.add(arg_value)
logger.debug('Using input items %s for %s', arg_value, arg)
elif type_ == 'OUTPUT_DATA_ITEM':
# the arg is an output item or list of them
out_arg = arg
out_arg_value = arg_value
# some inputs implicitly describe outputs
try:
out_arg = a.output_item
except AttributeError:
pass # no need to check legacy dict for this property as it was not supported in the legacy dict
else:
if out_arg is not None:
out_arg_value = getattr(obj, out_arg)
# process output args
if out_arg is not None:
if isinstance(out_arg_value, list):
output_list.extend(out_arg_value)
else:
output_list.append(out_arg_value)
logger.debug('Using output items %s for %s', out_arg_value, out_arg)
# output_meta is present in the AS metadata structure, but not
# currently produced for local functions
return (input_args, output_args, output_meta, input_set, output_list)
def build_ui_constants(self):
"""
Build attributes for each ui constants declared with the entity type
"""
if self.ui_constants is None:
logger.debug('No constants declared in entity definition')
self.ui_constants = []
params = {}
for c in self.ui_constants:
try:
params[c.name] = c.default
except AttributeError:
logger.warning(('Cannot set value of parameter %s as it does'
' not have a default value'), c.name)
self.set_params(**params)
def build_flat_stage_list(self):
"""
Build a flat list of all function objects defined for entity type
"""
stages = []
for stage in self._functions:
try:
is_system = stage.is_system_function
except AttributeError:
is_system = False
logger.warning(('Function %s has no is_system_function property.'
' This means it was not inherited from '
' an iotfunctions base class. AS authors are'
' strongly encouraged to always inherit '
' from iotfunctions base classes'), stage.__class__.__name__)
if not is_system:
stages.append(stage)
return stages
def build_granularities(self, grain_meta, freq_lookup):
"""
Convert AS granularity metadata to granularity objects.
"""
out = {}
for g in grain_meta:
grouper = []
freq = None
entity_id = None
if g['entityFirst']:
grouper.append( | pd.Grouper(key=self._entity_id) | pandas.Grouper |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_dist_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_PREV_TO_NEXT not in move_df
move_df.generate_dist_features()
assert_frame_equal(move_df, expected)
def test_generate_time_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
1.0,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1.0,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'time_to_prev',
'time_to_next',
'time_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TIME_PREV_TO_NEXT not in move_df
move_df.generate_time_features()
assert_frame_equal(move_df, expected)
def test_generate_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'speed_to_prev',
'speed_to_next',
'speed_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert SPEED_PREV_TO_NEXT not in move_df
move_df.generate_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_move_and_stop_by_radius():
move_df = _default_move_df()
new_move_df = move_df.generate_move_and_stop_by_radius(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
'nan',
],
[
1,
39.984198,
116.319322,
| Timestamp('2008-10-23 05:53:06') | pandas.Timestamp |
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank
from pylab import plot,subplot,axis,stem,show,figure
import numpy
import pandas
import math
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from sklearn import cross_validation
from sklearn.linear_model import LinearRegression
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
# to pca 3 to visual
def pca3perform():
data=pandas.read_table("data-encode.txt",sep=' ')
print("success read test.csv file")
#data=data.reset_index().values
#data=data.as_matrix()
y=data.iloc[0:,0]
y=y.as_matrix()
x=data.iloc[0:,1:]
x=x.as_matrix()
pca=PCA(n_components=3, copy=False)
temp=pca.fit(x)
temp=pca.transform(x)
print(temp,type(temp))
x=temp
temp=pandas.DataFrame(temp)
perform(pca,x,y)
def perform(pca,X,y):
fig = plt.figure(1, figsize=(50, 50))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[[0]])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[[1]])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
def pcaana(A):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A-mean(A.T,axis=1)).T # subtract the mean (along columns)
[latent,coeff] = linalg.eig(cov(M)) # attention:not always sorted
score = dot(coeff.T,M) # projection of the data in the new space
coeff, score, latent = pcaana(A)
print(coeff,score,latent)
figure("init figure")
#subplot(121)
# every eigenvector describe the direction
# of a principal component.
m = mean(A,axis=1)
plot([0, -coeff[0,0]*2]+m[0], [0, -coeff[0,1]*2]+m[1],'--k')
plot([0, coeff[1,0]*2]+m[0], [0, coeff[1,1]*2]+m[1],'--k')
plot(A[0,:],A[1,:],'ob') # the data
axis('equal')
subplot(122)
# new data
plot(score[0,:],score[1,:],'*g')
axis('equal')
show()
return coeff,score,latent
def en(X=[[0,0]]):
X=X-numpy.mean(X,axis=0)
[u,s,v]=numpy.linalg.svd(X)
v=v.transpose()
#v=v[:,:numcomp]
return numpy.dot(X,v)
def sigmod(x):
return int(round(1.0/(1+math.exp(-x)),0))
if __name__=="__main__":
pca3perform()
exit()
A = array([ [2.4,0.7,2.9,2.2,3.0,2.7,1.6,1.1,1.6,0.9],[2.4,1.7,2.9,2.2,3.0,2.7,2.6,1.1,1.6,0.9],
[2.5,0.5,2.2,1.9,3.1,2.3,2,1,1.5,1.1] ])
data= | pandas.read_csv("multi_phenos.txt",sep=' ',header=None) | pandas.read_csv |
import numpy as np
from sas7bdat import SAS7BDAT
import glob
import pandas as pd
from sklearn import preprocessing
from sas7bdat import SAS7BDAT
import glob
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import utils, model_selection, metrics, linear_model, neighbors, ensemble
def convertAllCHSData(year = [], onlySubjectsWBiomarkers = 0):
if onlySubjectsWBiomarkers:
print('Only obtaining data for subjects/households with biomarker data.')
dataDirs = glob.glob('./data/Master*')
for dir in dataDirs:
SASfiles = glob.glob(dir + '/*.sas7bdat')
for SASfile in SASfiles:
convertSASfile(SASfile, year)
def convertSASfile(inputFullPath, year = [], onlySubjectsWBiomarkers = 0):
print('Converting ' + inputFullPath)
df = SAS2DataFrame(inputFullPath, year = year)
outputName = inputFullPath.split('/')[-1].split('.')[0]
outputDir = '/'.join(inputFullPath.split('/')[0:-2])
if year:
outputFullPath = outputDir + '/' + outputName
outputFullPath = outputFullPath + '_' + str(year) + 'only' + '.csv'
else:
outputFullPath = outputDir + '/' + outputName + '.csv'
if onlySubjectsWBiomarkers:
subjectsWithBiomarkers = pd.read_csv('./data/subjectsWithBiomarkers.csv')
tmp = set(df.columns)
identifyingFields = list(tmp.intersection(set(subjectsWithBiomarkers.columns)))
if not identifyingFields:
print('No identifying fields found.')
return
elif identifyingFields.count('idind'):
selFactor = 'idind'
selidinds = list(set(df[selFactor]).intersection(set(subjectsWithBiomarkers[selFactor])))
selIdxs = [a in selidinds for a in df[selFactor]]
df = df[selIdxs]
elif identifyingFields.count('hhid'):
selFactor = 'hhid'
selidinds = list(set(df[selFactor]).intersection(set(subjectsWithBiomarkers[selFactor])))
selIdxs = [a in selidinds for a in df[selFactor]]
df = df[selIdxs]
elif identifyingFields.count('commid'):
selFactor = 'commid'
selidinds = list(set(df[selFactor]).intersection(set(subjectsWithBiomarkers[selFactor])))
selIdxs = [a in selidinds for a in df[selFactor]]
df = df[selIdxs]
print(str(df.shape[0]) + ' valid rows')
df.to_csv(outputFullPath)
return
def SAS2DataFrame(inputFullPath, year = []):
with SAS7BDAT(inputFullPath, skip_header=False) as reader:
df = reader.to_data_frame()
df.columns = [col.lower() for col in df.columns]
if (not not year) & any(df.columns == 'wave'):
df = df[df['wave'] == year]
return df
def getSurveyData():
''' Gets relevant survey data for dHealth project
i.e. survey data for subjects that have biomarker data
'''
surveyPath = './data/Master_ID_201908/surveys_pub_12.sas7bdat'
surveyData = SAS2DataFrame(surveyPath)
surveyData = surveyData[(surveyData['biomaker'] == 1) & (surveyData['wave'] == 2009)]
return surveyData
def getBiomarkerData():
surveyData = getSurveyData()
biomarkerPath = './data/Master_Biomarker_2009/biomarker_09.sas7bdat'
biomarkerData = SAS2DataFrame(biomarkerPath)
ids1 = set(biomarkerData.idind)
ids2 = set(surveyData.idind)
excludeIds = list(ids1.difference(ids2))
for id in excludeIds:
tmp = list(biomarkerData.idind)
idx = tmp.index(id)
biomarkerData = biomarkerData.drop(idx)
return biomarkerData
def createSubjectsWithBiomarkersCSV():
surveyData = getSurveyData()
surveyData = surveyData.iloc[:,[0,1,5,3]]
surveyData.columns = ['idind', 'hhid', 'commid', 'Age']
surveyData.to_csv('./data/subjectsWithBiomarkers.csv')
# createSubjectsWithBiomarkersCSV()
featureMap = pd.read_csv('featureTableMap.csv')
subjects = pd.read_csv('./data/subjectsWithBiomarkers.csv',usecols = ['idind','Age']) # Could add others too'hhid','commid'
def createGenderCSV():
print('Extracting gender data...')
subjects = pd.read_csv('./data/subjectsWithBiomarkers.csv',usecols = ['idind','hhid','commid'])
subjects = subjects.astype({'idind': 'int',
'hhid': 'int',
'commid': 'int'})
def getGender(subjectIdx, idind_1, idind_2, sex_1, sex_2):
gender = np.nan
if subjects.idind[subjectIdx] in idind_1:
idx = idind_1.index(subjects.idind[subjectIdx])
gender = int(sex_1[idx])
elif subjects.idind[subjectIdx] in idind_2:
idx = idind_2.index(subjects.idind[subjectIdx])
gender = int(sex_2[idx])
else:
gender = np.nan
if gender == 1:
gender = int(1)
elif gender == 2:
gender = 0
if subjectIdx % 500 == 0:
print(str(100*subjectIdx/9548) + '% complete')
return gender
relations = pd.read_csv('./data/relationmast_pub_00_2009only.csv')
idind_1 = list(relations.idind_1)
idind_2 = list(relations.idind_2)
sex_1 = list(relations.sex_1)
sex_2 = list(relations.sex_2)
gender = [getGender(i, idind_1, idind_2, sex_1, sex_2) for i in range(len(subjects))]
d = {'idind': subjects.idind, 'Sex': gender}
df = pd.DataFrame(data=d)
df.to_csv('./data/gender.csv')
def createSleep_ScreenTimeCSV():
sleep_screenTime = | pd.read_csv('./data/pact_12_2009only.csv',usecols = ['idind', 'u324', 'u339','u340_mn', 'u341_mn','u508', 'u509_mn','u510_mn','u345','u346_mn', 'u347_mn']) | pandas.read_csv |
"""
This script plots the ARI and Runtime values obtained from graspyclust_experiments.py, autogmm_experiments.py, and mclust_experiments.r
It saves the figures as subset_abc.png and subset_def.png
"""
#%%
import numpy as np
from scipy.stats import mode
from scipy.stats import wilcoxon
from sklearn.metrics import adjusted_rand_score
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
#%%
print("Reading data...")
path = "/results/"
# read the data
mclust_s = pd.read_csv(path + "mclust_synthetic.csv")
mclust_s = mclust_s.loc[:, ["ARI", "Time"]]
mclust_s["Dataset"] = mclust_s.shape[0] * ["Synthetic"]
mclust_s["Algorithm"] = mclust_s.shape[0] * ["mclust"]
mclust_bc = pd.read_csv(path + "mclust_bc.csv")
mclust_bc = mclust_bc.loc[:, ["ARI", "Time"]]
mclust_bc["Dataset"] = mclust_bc.shape[0] * ["Breast Cancer"]
mclust_bc["Algorithm"] = mclust_bc.shape[0] * ["mclust"]
mclust_dro = pd.read_csv(path + "mclust_drosophila.csv")
mclust_dro = mclust_dro.loc[:, ["ARI", "Time"]]
mclust_dro["Dataset"] = mclust_dro.shape[0] * ["Drosophila"]
mclust_dro["Algorithm"] = mclust_dro.shape[0] * ["mclust"]
autogmm_s = pd.read_csv(path + "autogmm_synthetic.csv")
autogmm_s = autogmm_s.loc[:, ["ARI", "Time"]]
autogmm_s["Dataset"] = autogmm_s.shape[0] * ["Synthetic"]
autogmm_s["Algorithm"] = autogmm_s.shape[0] * ["AutoGMM"]
autogmm_bc = pd.read_csv(path + "autogmm_bc.csv")
autogmm_bc = autogmm_bc.loc[:, ["ARI", "Time"]]
autogmm_bc["Dataset"] = autogmm_bc.shape[0] * ["Breast Cancer"]
autogmm_bc["Algorithm"] = autogmm_bc.shape[0] * ["AutoGMM"]
autogmm_dro = | pd.read_csv(path + "autogmm_drosophila.csv") | pandas.read_csv |
'''
Contains classes of models that can be found in `Vo and Zhang 2015 paper \
<https://www.ijcai.org/Proceedings/15/Papers/194.pdf>`_.
Classes:
1. :py:class:`bella.models.target.TargetInd` - Target indepdent model
'''
from collections import defaultdict
import copy
import time
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from bella.tokenisers import ark_twokenize
from bella.neural_pooling import matrix_max, matrix_min, matrix_avg,\
matrix_median, matrix_prod, matrix_std
from bella.notebook_helper import get_json_data, write_json_data
from bella.scikit_features.context import Context
from bella.scikit_features.tokeniser import ContextTokeniser
from bella.scikit_features.word_vector import ContextWordVectors
from bella.scikit_features.lexicon_filter import LexiconFilter
from bella.scikit_features.neural_pooling import NeuralPooling
from bella.scikit_features.join_context_vectors import JoinContextVectors
class TargetInd():
def __init__(self):
self.model = None
self.pipeline = Pipeline([
('contexts', Context('full')),
('tokens', ContextTokeniser(ark_twokenize, True)),
('word_vectors', ContextWordVectors()),
('pool_funcs', FeatureUnion([
('max_pipe', Pipeline([
('max', NeuralPooling(matrix_max)),
('join', JoinContextVectors(matrix_median))
])),
('min_pipe', Pipeline([
('min', NeuralPooling(matrix_min)),
('join', JoinContextVectors(matrix_median))
])),
('avg_pipe', Pipeline([
('avg', NeuralPooling(matrix_avg)),
('join', JoinContextVectors(matrix_median))
])),
('prod_pipe', Pipeline([
('min', NeuralPooling(matrix_prod)),
('join', JoinContextVectors(matrix_median))
])),
('std_pipe', Pipeline([
('min', NeuralPooling(matrix_std)),
('join', JoinContextVectors(matrix_median))
]))
])),
('scale', MinMaxScaler()),
('svm', LinearSVC(C=0.01))
])
def save_model(self, model_file, verbose=0):
if self.model is None:
raise ValueError('Model is not fitted please fit the model '\
'using the fit function')
time_taken = time.time()
joblib.dump(self.model, model_file)
if verbose == 1:
time_taken = round(time.time() - time_taken, 2)
print('Model saved to {}. Save time {}'\
.format(model_file, time_taken))
def load_model(self, model_file, verbose=0):
if verbose == 1:
time_taken = time.time()
print('Loading model from {}'.format(model_file))
self.model = joblib.load(model_file)
time_taken = round(time.time() - time_taken, 2)
print('Model successfully loaded. Load time {}'.format(time_taken))
else:
self.model = joblib.load(model_file)
def find_best_c(self, train_data, train_y, grid_params, save_file=None,
dataset_name=None, re_write=False, **kwargs):
'''
:param train_data: Training instances to grid search over
:param train_y: Training True values to grid search over
:param grid_params: parameters for the model, all parameters can be \
found from the `get_cv_params` function. The C value parameter will be \
ignored if given.
:param kwargs: keywords arguments to give as arguments to the scikit learn \
`GridSearchCV <http://scikit-learn.org/stable/modules/generated/sklearn.\
model_selection.GridSearchCV.html>`_ object e.g. cv=10.
:type train_data: array/list
:type train_y: array/list
:type grid_params: dict
:type kwargs: dict
:returns: Searches through two sets of C values a coarse grain values \
then a fine grain. Grid searches over these values to return the best \
C value without doing a full exhaustive search. This method inspired by \
`Hsu et al. SVM guide \
<https://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf>`_
:rtype: float
'''
def best_c_value(c_scores):
best = 0
best_c = 0
for c_value, acc in c_scores.items():
if acc > best:
best_c = c_value
best = acc
return float(best_c)
def grid_res_to_dict(grid_results):
c_score = {}
c_scores = grid_results[['param_svm__C', 'mean_test_score']]
for i in c_scores.index:
c_result = c_scores.loc[i]
c_value = c_result['param_svm__C']
test_score = c_result['mean_test_score']
c_score[c_value] = test_score
return c_score
save_file_given = save_file is not None and dataset_name is not None
# If C value given in grid_params remove it
if 'C' in grid_params:
del grid_params['C']
if save_file_given and not re_write:
c_scores = get_json_data(save_file, dataset_name)
if c_scores != {}:
return best_c_value(c_scores), c_scores
# Coarse grain search
coarse_range = []
start = 0.00001
stop = 10
while True:
coarse_range.append(start)
start *= 10
if start > stop:
break
grid_params['C'] = coarse_range
cv_params = self.get_cv_params(**grid_params)
c_scores = {}
coarse_results = self.grid_search(train_data, train_y,
params=cv_params, **kwargs)
c_scores = {**grid_res_to_dict(coarse_results), **c_scores}
best_coarse_c = self.model.best_params_['svm__C']
# Fine grain search
fine_range = [(best_coarse_c / 10) * 3.5,
(best_coarse_c / 10) * 7, best_coarse_c,
best_coarse_c * 3.5, best_coarse_c * 7]
grid_params['C'] = fine_range
cv_params = self.get_cv_params(**grid_params)
fine_results = self.grid_search(train_data, train_y,
params=cv_params, **kwargs)
c_scores = {**grid_res_to_dict(fine_results), **c_scores}
best_c = self.model.best_params_['svm__C']
c_2_string = self.c_param_name(c_scores.keys())
c_scores = {c_2_string[c] : value for c, value in c_scores.items()}
if save_file_given:
write_json_data(save_file, dataset_name, c_scores)
return best_c, c_scores
def c_param_name(self, c_values):
'''
:param c_values: A list of floats representing C values to be mapped to \
String values
:type c_values: list
:returns: A dict of float to String values where the float represents \
the true C value and the String is it's String representation.
:rtype: dict
'''
return {c_value : str(c_value) for c_value in c_values}
def senti_lexicon_param_name(self, senti_lexicons):
'''
:param all_word_vectors: A list of Lexicon instances
:type word_vectors: list
:returns: A dict mapping Lexicon instance with the String name of the \
lexicon
:rtype: dict
'''
return {senti_lexicon : senti_lexicon.name \
for senti_lexicon in senti_lexicons}
def word_vector_param_name(self, all_word_vectors):
'''
:param all_word_vectors: A list of a list of WordVector instances
:type word_vectors: list
:returns: A dict of tuples containing WordVector instances and there \
String representation found using their name attribute.
:rtype: dict
'''
word_vector_2_name = {}
for word_vectors in all_word_vectors:
word_vectors_tuple = tuple(word_vectors)
word_vectors_name = [word_vector.name for word_vector in word_vectors]
word_vectors_name = ' '.join(word_vectors_name)
word_vector_2_name[word_vectors_tuple] = word_vectors_name
return word_vector_2_name
def tokeniser_param_name(self, tokenisers):
'''
:param tokenisers: A list of tokeniser functions
:type tokenisers: list
:returns: A dict of tokeniser function to the name of the tokeniser \
function as a String
:rtype: dict
'''
return {tokeniser : tokeniser.__name__ for tokeniser in tokenisers}
def param_name_function(self, param_name):
'''
:param param_name: Name of the only parameter being searched for in \
the grid search
:type param_name: String
:returns: A function that can map the parameter values of the parameter \
name to meaningful String values
:rtype: function
'''
if param_name == 'word_vectors':
return self.word_vector_param_name
elif param_name == 'tokenisers':
return self.tokeniser_param_name
elif param_name == 'C':
return self.c_param_name
elif param_name == 'senti_lexicons':
return self.senti_lexicon_param_name
elif param_name == 'parsers':
return self.tokeniser_param_name
else:
raise ValueError('param_name has to be on of the following values:'\
'word_vectors, tokenisers or C not {}'\
.format(param_name))
@staticmethod
def _get_word_vector_names():
'''
Method to be overidden by subclasses as each pipeline will be different
and will have different parameter names for where the word vectors are
set.
:returns: A list of of parameter names where the word vectors are set in \
the pipeline.
:rtype: list
'''
return ['word_vectors__vectors']
@staticmethod
def _get_tokeniser_names():
'''
Method to be overidden by subclasses as each pipeline will be different
and will have different parameter names for where the tokenisers are
set.
:returns: A list of of parameter names where the tokenisers are set in \
the pipeline.
:rtype: list
'''
return ['tokens']
@staticmethod
def _add_to_params_dict(params_dict, keys, value):
'''
Given a dictionary it adds the value to each key in the list of keys
into the dictionary. Returns the updated dictionary.
:param params_dict: Dictionary to be updated
:param keys: list of keys
:param value: value to be added to each key in the list of keys.
:type params_dict: dict
:type keys: list
:type value: Python object
:returns: The dictionary updated
:rtype: dict
'''
if not isinstance(keys, list):
raise ValueError('The keys parameter has to be of type list and not {}'\
.format(type(keys)))
for key in keys:
params_dict[key] = value
return params_dict
def get_params(self, word_vector, tokeniser=None, lower=None, C=None,
random_state=None, scale=True):
'''
This method is to be overidden when more values than those listed in the
attributes are required for the model. E.g. a lexicon.
If values are not required e.g. lower then the model has a defualt value
for it which will be used when the user does not set a value here.
:param word_vector: A list of `bella.word_vectors.WordVectors` \
instances e.g. [WordVectors(), AnotherWordVector()]
:param tokeniser: A tokeniser method from `bella.tokenisers` \
or a method that conforms to the same output as `bella.tokenisers`
:param lower: A bool which indicate wether to lower case the input words.
:param C: A float which indicates the C value of the SVM classifier.
:param random_state: A int which defines the random number to generate \
to shuffle the data. Used to ensure reproducability.
:param scale: bool indicating to use scaling or not. Default is to scale.
:type word_vector: list
:type tokeniser: function
:type lower: bool
:type C: float
:type random_state: int
:type scale: bool Default True
:return: A parameter dict which indicates the parameters the model should \
use. The return of this function can be used as the params attribute in \
the `fit` method.
:rtype: dict
'''
params_dict = {}
params_dict = self._add_to_params_dict(params_dict,
self._get_word_vector_names(),
word_vector)
if tokeniser is not None:
tokenisers_names = [param_name + '__tokeniser'
for param_name in self._get_tokeniser_names()]
params_dict = self._add_to_params_dict(params_dict, tokenisers_names,
tokeniser)
if lower is not None:
lower_names = [param_name + '__lower'
for param_name in self._get_tokeniser_names()]
params_dict = self._add_to_params_dict(params_dict, lower_names, lower)
if C is not None:
params_dict = self._add_to_params_dict(params_dict, ['svm__C'], C)
if random_state is not None:
params_dict = self._add_to_params_dict(params_dict,
['svm__random_state'], random_state)
if scale:
params_dict = self._add_to_params_dict(params_dict, ['scale'],
MinMaxScaler())
else:
params_dict = self._add_to_params_dict(params_dict, ['scale'], None)
return params_dict
@staticmethod
def _add_to_params(params_list, to_add, to_add_names):
'''
Used to add parameters that are stated multiple times in the same
pipeline that must have the same value therefore to add them you
have to copy the current parameter list N amount of times where N is
the length of the to_add list. Returns the updated parameter list.
Method to add parameters that are set in multiple parts of the pipeline
but should contain the same value.
:params_list: A list of dicts where each dict contains parameters and \
corresponding values that are to be searched for. All dict are part of \
the search space.
:param to_add: List of values that are to be added to the search space.
:param to_add_names: List of names that are associated to the values.
:type params_list: list
:type to_add: list
:type to_add_names: list
:returns: The updated params_list
:rtype: list
'''
num_params = len(params_list)
num_to_add = len(to_add)
new_param_list = []
# Catch the case that params_list was originally empty
if num_params == 0:
for _ in range(num_to_add):
new_param_list.append([defaultdict(list)])
else:
for _ in range(num_to_add):
new_param_list.append(copy.deepcopy(params_list))
for index, param in enumerate(to_add):
for param_name in to_add_names:
for sub_list in new_param_list[index]:
sub_list[param_name].append(param)
params_list = [param_dict for sub_list in new_param_list
for param_dict in sub_list]
return params_list
@staticmethod
def _add_to_all_params(params_list, param_name, param_value):
'''
Used to add param_name and its values to each dictionary of parameters
in the params_list. Returns the updated params_list.
:param params_list: A list of dicts where each dict contains parameters and \
corresponding values that are to be searched for. All dict are part of \
the search space.
:param param_name: The name associated to the parameter value to be added \
to the params_list.
:param param_value: The list of values associated to the param_name that are \
added to the params_list linked to the associated name.
:type param_list: list
:type param_name: String
:type param_value: list
:returns: The updated params_list
:rtype: list
'''
for param_dict in params_list:
param_dict[param_name] = param_value
return params_list
def get_cv_params(self, word_vectors, tokenisers=None, lowers=None, C=None,
scale=None, random_state=None):
'''
Each attribute has to be a list which contains parameters that are to be
tunned.
This method is to be overidden when more values than those listed in the
attributes are required for the model. E.g. a lexicon.
:param word_vectors: A list of a list of `bella.word_vectors.WordVectors` \
instances e.g. [[WordVectors()], [WordVectors(), AnotherWordVector()]]
:param tokenisers: A list of tokenisers methods from `bella.tokenisers` \
or a list of methods that conform to the same output as `bella.tokenisers`
:param lowers: A list of bool values which indicate wether to lower case \
the input words.
:param C: A list of floats which indicate the C value on the SVM classifier.
:param random_state: A int which defines the random number to generate \
to shuffle the data. Used to ensure reproducability.
:param scale: Can only be the value None to not scale the data. Do not \
include this
:type word_vectors: list
:type tokenisers: list
:type lowers: list
:type C: list
:type random_state: int
:type scale: None
:return: A list of dicts where each dict represents a different \
parameter space to search. Used as the params attribute to grid_search \
function.
:rtype: list
'''
params_list = []
params_list = self._add_to_params(params_list, word_vectors,
self._get_word_vector_names())
if tokenisers is not None:
tokenisers_names = [param_name + '__tokeniser'
for param_name in self._get_tokeniser_names()]
params_list = self._add_to_params(params_list, tokenisers,
tokenisers_names)
if lowers is not None:
lower_names = [param_name + '__lower'
for param_name in self._get_tokeniser_names()]
params_list = self._add_to_params(params_list, lowers, lower_names)
if C is not None:
params_list = self._add_to_all_params(params_list, 'svm__C', C)
if random_state is not None:
if not isinstance(random_state, int):
raise TypeError('random_state should be of type int and not {}'\
.format(type(random_state)))
random_state = [random_state]
params_list = self._add_to_all_params(params_list, 'svm__random_state',
random_state)
if scale is not None:
scale_params = []
if len(scale) > 2:
raise ValueError('Scale has to be a list, that can only '\
'contain two values False to not scale and '\
'True to scale your list contains more than '\
'two values {}'.format(scale))
for value in scale:
if value:
scale_params.append(MinMaxScaler())
else:
scale_params.append(None)
params_list = self._add_to_all_params(params_list,
scale_params, ['scale'])
return params_list
def fit(self, train_data, train_y, params):
temp_pipeline = copy.deepcopy(self.pipeline)
temp_pipeline.set_params(**params)
temp_pipeline.fit(train_data, train_y)
self.model = temp_pipeline
def grid_search(self, train_data, train_y, params, **kwargs):
grid_search = GridSearchCV(self.pipeline, param_grid=params, **kwargs)
self.model = grid_search.fit(train_data, train_y)
cross_val_results = | pd.DataFrame(grid_search.cv_results_) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import scrapy # needed to scrape
import xlrd # used to easily import xlsx file
import json
import re
import pandas as pd
import numpy as np
from openpyxl import load_workbook
import datetime
#from datetime import timedelta
class ScrapeTokenData(scrapy.Spider):
name = 'CreateTokenListbot' # Name of Script
start_urls = ['https://eidoo.io/erc20-tokens-list/']
print("``````````````````````````````````````````````````````````````````````````````")
################################################################################################
################################################################################################
"""
Scrape Daily Ercot data and Append to "MASTER-Ercot" file
"""
def parse(self, response):
self.logger.info('A response has arrived from %s', response.url)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
### Scrape table Headers and Values (energy prices)
values = response.css("#col > h4:nth-child(2)").extract()
tokens = [value for idx, value in enumerate(values) if idx % 4 == 0]
marketCap = [value for idx, value in enumerate(values) if idx % 4 == 2]
ranking = list(range(1,len(tokens) + 1))
# Clean up
tokens = [item.replace("<h4>","") for item in tokens]
tokens = [item.replace("</h4>","") for item in tokens]
marketCap = [item.replace("<h4>","").strip() for item in marketCap]
marketCap = [item.replace("</h4>","").strip() for item in marketCap]
temp = [item.split(" (") for item in tokens]
temp = | pd.DataFrame(data=temp, index=ranking) | pandas.DataFrame |
import os
import warnings
from collections import OrderedDict
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from sklearn.exceptions import NotFittedError, UndefinedMetricWarning
from sklearn.preprocessing import label_binarize
from evalml.exceptions import NoPositiveLabelException
from evalml.model_understanding.graphs import (
binary_objective_vs_threshold,
calculate_permutation_importance,
confusion_matrix,
decision_tree_data_from_estimator,
decision_tree_data_from_pipeline,
get_linear_coefficients,
get_prediction_vs_actual_data,
get_prediction_vs_actual_over_time_data,
graph_binary_objective_vs_threshold,
graph_confusion_matrix,
graph_partial_dependence,
graph_permutation_importance,
graph_precision_recall_curve,
graph_prediction_vs_actual,
graph_prediction_vs_actual_over_time,
graph_roc_curve,
graph_t_sne,
normalize_confusion_matrix,
precision_recall_curve,
roc_curve,
t_sne,
visualize_decision_tree,
)
from evalml.objectives import CostBenefitMatrix
from evalml.pipelines import (
BinaryClassificationPipeline,
DecisionTreeRegressor,
ElasticNetRegressor,
LinearRegressor,
MulticlassClassificationPipeline,
RegressionPipeline,
TimeSeriesRegressionPipeline,
)
from evalml.problem_types import ProblemTypes
from evalml.utils import get_random_state, infer_feature_types
@pytest.fixture
def test_pipeline():
class TestPipeline(BinaryClassificationPipeline):
component_graph = [
"Simple Imputer",
"One Hot Encoder",
"Standard Scaler",
"Logistic Regression Classifier",
]
def __init__(self, parameters):
super().__init__(self.component_graph, parameters=parameters)
return TestPipeline(parameters={"Logistic Regression Classifier": {"n_jobs": 1}})
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_confusion_matrix(data_type, make_data_type):
y_true = np.array([2, 0, 2, 2, 0, 1, 1, 0, 2])
y_predicted = np.array([0, 0, 2, 2, 0, 2, 1, 1, 1])
y_true = make_data_type(data_type, y_true)
y_predicted = make_data_type(data_type, y_predicted)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method=None)
conf_mat_expected = np.array([[2, 1, 0], [0, 1, 1], [1, 1, 2]])
assert np.array_equal(conf_mat_expected, conf_mat.to_numpy())
assert isinstance(conf_mat, pd.DataFrame)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method="all")
conf_mat_expected = conf_mat_expected / 9.0
assert np.array_equal(conf_mat_expected, conf_mat.to_numpy())
assert isinstance(conf_mat, pd.DataFrame)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method="true")
conf_mat_expected = np.array(
[[2 / 3.0, 1 / 3.0, 0], [0, 0.5, 0.5], [0.25, 0.25, 0.5]]
)
assert np.array_equal(conf_mat_expected, conf_mat.to_numpy())
assert isinstance(conf_mat, pd.DataFrame)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method="pred")
conf_mat_expected = np.array(
[[2 / 3.0, 1 / 3.0, 0], [0, 1 / 3.0, 1 / 3.0], [1 / 3.0, 1 / 3.0, 2 / 3.0]]
)
assert np.allclose(conf_mat_expected, conf_mat.to_numpy(), equal_nan=True)
assert isinstance(conf_mat, pd.DataFrame)
with pytest.raises(ValueError, match="Invalid value provided"):
conf_mat = confusion_matrix(
y_true, y_predicted, normalize_method="Invalid Option"
)
@pytest.mark.parametrize("data_type", ["ww", "np", "pd"])
def test_normalize_confusion_matrix(data_type, make_data_type):
conf_mat = np.array([[2, 3, 0], [0, 1, 1], [1, 0, 2]])
conf_mat = make_data_type(data_type, conf_mat)
conf_mat_normalized = normalize_confusion_matrix(conf_mat)
assert all(conf_mat_normalized.sum(axis=1) == 1.0)
assert isinstance(conf_mat_normalized, pd.DataFrame)
conf_mat_normalized = normalize_confusion_matrix(conf_mat, "pred")
for col_sum in conf_mat_normalized.sum(axis=0):
assert col_sum == 1.0 or col_sum == 0.0
conf_mat_normalized = normalize_confusion_matrix(conf_mat, "all")
assert conf_mat_normalized.sum().sum() == 1.0
# testing with named pd.DataFrames
conf_mat_df = pd.DataFrame()
conf_mat_df["col_1"] = [0, 1, 2]
conf_mat_df["col_2"] = [0, 0, 3]
conf_mat_df["col_3"] = [2, 0, 0]
conf_mat_normalized = normalize_confusion_matrix(conf_mat_df)
assert all(conf_mat_normalized.sum(axis=1) == 1.0)
assert list(conf_mat_normalized.columns) == ["col_1", "col_2", "col_3"]
conf_mat_normalized = normalize_confusion_matrix(conf_mat_df, "pred")
for col_sum in conf_mat_normalized.sum(axis=0):
assert col_sum == 1.0 or col_sum == 0.0
conf_mat_normalized = normalize_confusion_matrix(conf_mat_df, "all")
assert conf_mat_normalized.sum().sum() == 1.0
@pytest.mark.parametrize("data_type", ["ww", "np", "pd"])
def test_normalize_confusion_matrix_error(data_type, make_data_type):
conf_mat = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
conf_mat = make_data_type(data_type, conf_mat)
warnings.simplefilter("default", category=RuntimeWarning)
with pytest.raises(
ValueError,
match='Invalid value provided for "normalize_method": invalid option',
):
normalize_confusion_matrix(conf_mat, normalize_method="invalid option")
with pytest.raises(ValueError, match="Invalid value provided"):
normalize_confusion_matrix(conf_mat, normalize_method=None)
with pytest.raises(ValueError, match="Sum of given axis is 0"):
normalize_confusion_matrix(conf_mat, "true")
with pytest.raises(ValueError, match="Sum of given axis is 0"):
normalize_confusion_matrix(conf_mat, "pred")
with pytest.raises(ValueError, match="Sum of given axis is 0"):
normalize_confusion_matrix(conf_mat, "all")
@pytest.mark.parametrize("data_type", ["ww", "pd", "np"])
def test_confusion_matrix_labels(data_type, make_data_type):
y_true = np.array([True, False, True, True, False, False])
y_pred = np.array([False, False, True, True, False, False])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = [False, True]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array([0, 1, 0, 1, 0, 1])
y_pred = np.array([0, 1, 1, 1, 1, 1])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = [0, 1]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array(["blue", "red", "blue", "red"])
y_pred = np.array(["blue", "red", "red", "red"])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = ["blue", "red"]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array(["blue", "red", "red", "red", "orange", "orange"])
y_pred = np.array(["red", "blue", "blue", "red", "orange", "orange"])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = ["blue", "orange", "red"]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array([0, 1, 2, 1, 2, 1, 2, 3])
y_pred = np.array([0, 1, 1, 1, 1, 1, 3, 3])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = [0, 1, 2, 3]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
@pytest.fixture
def binarized_ys(X_y_multi):
_, y_true = X_y_multi
rs = get_random_state(42)
y_tr = label_binarize(y_true, classes=[0, 1, 2])
y_pred_proba = y_tr * rs.random(y_tr.shape)
return y_true, y_tr, y_pred_proba
def test_precision_recall_curve_return_type():
y_true = np.array([0, 0, 1, 1])
y_predict_proba = np.array([0.1, 0.4, 0.35, 0.8])
precision_recall_curve_data = precision_recall_curve(y_true, y_predict_proba)
assert isinstance(precision_recall_curve_data["precision"], np.ndarray)
assert isinstance(precision_recall_curve_data["recall"], np.ndarray)
assert isinstance(precision_recall_curve_data["thresholds"], np.ndarray)
assert isinstance(precision_recall_curve_data["auc_score"], float)
@pytest.mark.parametrize("data_type", ["np", "pd", "pd2d", "li", "ww"])
def test_precision_recall_curve(data_type, make_data_type):
y_true = np.array([0, 0, 1, 1])
y_predict_proba = np.array([0.1, 0.4, 0.35, 0.8])
if data_type == "pd2d":
data_type = "pd"
y_predict_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
y_true = make_data_type(data_type, y_true)
y_predict_proba = make_data_type(data_type, y_predict_proba)
precision_recall_curve_data = precision_recall_curve(y_true, y_predict_proba)
precision = precision_recall_curve_data.get("precision")
recall = precision_recall_curve_data.get("recall")
thresholds = precision_recall_curve_data.get("thresholds")
precision_expected = np.array([0.66666667, 0.5, 1, 1])
recall_expected = np.array([1, 0.5, 0.5, 0])
thresholds_expected = np.array([0.35, 0.4, 0.8])
np.testing.assert_almost_equal(precision_expected, precision, decimal=5)
np.testing.assert_almost_equal(recall_expected, recall, decimal=5)
np.testing.assert_almost_equal(thresholds_expected, thresholds, decimal=5)
def test_precision_recall_curve_pos_label_idx():
y_true = pd.Series(np.array([0, 0, 1, 1]))
y_predict_proba = pd.DataFrame(
np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
)
precision_recall_curve_data = precision_recall_curve(
y_true, y_predict_proba, pos_label_idx=1
)
precision = precision_recall_curve_data.get("precision")
recall = precision_recall_curve_data.get("recall")
thresholds = precision_recall_curve_data.get("thresholds")
precision_expected = np.array([0.66666667, 0.5, 1, 1])
recall_expected = np.array([1, 0.5, 0.5, 0])
thresholds_expected = np.array([0.35, 0.4, 0.8])
np.testing.assert_almost_equal(precision_expected, precision, decimal=5)
np.testing.assert_almost_equal(recall_expected, recall, decimal=5)
np.testing.assert_almost_equal(thresholds_expected, thresholds, decimal=5)
y_predict_proba = pd.DataFrame(
np.array([[0.1, 0.9], [0.4, 0.6], [0.35, 0.65], [0.8, 0.2]])
)
precision_recall_curve_data = precision_recall_curve(
y_true, y_predict_proba, pos_label_idx=0
)
np.testing.assert_almost_equal(precision_expected, precision, decimal=5)
np.testing.assert_almost_equal(recall_expected, recall, decimal=5)
np.testing.assert_almost_equal(thresholds_expected, thresholds, decimal=5)
def test_precision_recall_curve_pos_label_idx_error(make_data_type):
y_true = np.array([0, 0, 1, 1])
y_predict_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
with pytest.raises(
NoPositiveLabelException,
match="Predicted probabilities of shape \\(4, 2\\) don't contain a column at index 9001",
):
precision_recall_curve(y_true, y_predict_proba, pos_label_idx=9001)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_graph_precision_recall_curve(X_y_binary, data_type, make_data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
X = make_data_type(data_type, X)
y_true = make_data_type(data_type, y_true)
fig = graph_precision_recall_curve(y_true, y_pred_proba)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Precision-Recall"
assert len(fig_dict["data"]) == 1
precision_recall_curve_data = precision_recall_curve(y_true, y_pred_proba)
assert np.array_equal(
fig_dict["data"][0]["x"], precision_recall_curve_data["recall"]
)
assert np.array_equal(
fig_dict["data"][0]["y"], precision_recall_curve_data["precision"]
)
assert fig_dict["data"][0]["name"] == "Precision-Recall (AUC {:06f})".format(
precision_recall_curve_data["auc_score"]
)
def test_graph_precision_recall_curve_title_addition(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
fig = graph_precision_recall_curve(
y_true, y_pred_proba, title_addition="with added title text"
)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Precision-Recall with added title text"
)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_roc_curve_binary(data_type, make_data_type):
y_true = np.array([1, 1, 0, 0])
y_predict_proba = np.array([0.1, 0.4, 0.35, 0.8])
y_true = make_data_type(data_type, y_true)
y_predict_proba = make_data_type(data_type, y_predict_proba)
roc_curve_data = roc_curve(y_true, y_predict_proba)[0]
fpr_rates = roc_curve_data.get("fpr_rates")
tpr_rates = roc_curve_data.get("tpr_rates")
thresholds = roc_curve_data.get("thresholds")
auc_score = roc_curve_data.get("auc_score")
fpr_expected = np.array([0, 0.5, 0.5, 1, 1])
tpr_expected = np.array([0, 0, 0.5, 0.5, 1])
thresholds_expected = np.array([1.8, 0.8, 0.4, 0.35, 0.1])
assert np.array_equal(fpr_expected, fpr_rates)
assert np.array_equal(tpr_expected, tpr_rates)
assert np.array_equal(thresholds_expected, thresholds)
assert auc_score == pytest.approx(0.25, 1e-5)
assert isinstance(roc_curve_data["fpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["tpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["thresholds"], np.ndarray)
y_true = np.array([1, 1, 0, 0])
y_predict_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
y_predict_proba = make_data_type(data_type, y_predict_proba)
y_true = make_data_type(data_type, y_true)
roc_curve_data = roc_curve(y_true, y_predict_proba)[0]
fpr_rates = roc_curve_data.get("fpr_rates")
tpr_rates = roc_curve_data.get("tpr_rates")
thresholds = roc_curve_data.get("thresholds")
auc_score = roc_curve_data.get("auc_score")
fpr_expected = np.array([0, 0.5, 0.5, 1, 1])
tpr_expected = np.array([0, 0, 0.5, 0.5, 1])
thresholds_expected = np.array([1.8, 0.8, 0.4, 0.35, 0.1])
assert np.array_equal(fpr_expected, fpr_rates)
assert np.array_equal(tpr_expected, tpr_rates)
assert np.array_equal(thresholds_expected, thresholds)
assert auc_score == pytest.approx(0.25, 1e-5)
assert isinstance(roc_curve_data["fpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["tpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["thresholds"], np.ndarray)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_roc_curve_multiclass(data_type, make_data_type):
y_true = np.array([1, 2, 0, 0, 2, 1])
y_predict_proba = np.array(
[
[0.33, 0.33, 0.33],
[0.05, 0.05, 0.90],
[0.75, 0.15, 0.10],
[0.8, 0.1, 0.1],
[0.1, 0.1, 0.8],
[0.3, 0.4, 0.3],
]
)
y_true = make_data_type(data_type, y_true)
y_predict_proba = make_data_type(data_type, y_predict_proba)
roc_curve_data = roc_curve(y_true, y_predict_proba)
fpr_expected = [[0, 0, 0, 1], [0, 0, 0, 0.25, 0.75, 1], [0, 0, 0, 0.5, 1]]
tpr_expected = [[0, 0.5, 1, 1], [0, 0.5, 1, 1, 1, 1], [0, 0.5, 1, 1, 1]]
thresholds_expected = [
[1.8, 0.8, 0.75, 0.05],
[1.4, 0.4, 0.33, 0.15, 0.1, 0.05],
[1.9, 0.9, 0.8, 0.3, 0.1],
]
auc_expected = [1, 1, 1]
y_true_unique = y_true
if data_type == "ww":
y_true_unique = y_true
for i in np.unique(y_true_unique):
fpr_rates = roc_curve_data[i].get("fpr_rates")
tpr_rates = roc_curve_data[i].get("tpr_rates")
thresholds = roc_curve_data[i].get("thresholds")
auc_score = roc_curve_data[i].get("auc_score")
assert np.array_equal(fpr_expected[i], fpr_rates)
assert np.array_equal(tpr_expected[i], tpr_rates)
assert np.array_equal(thresholds_expected[i], thresholds)
assert auc_expected[i] == pytest.approx(auc_score, 1e-5)
assert isinstance(roc_curve_data[i]["fpr_rates"], np.ndarray)
assert isinstance(roc_curve_data[i]["tpr_rates"], np.ndarray)
assert isinstance(roc_curve_data[i]["thresholds"], np.ndarray)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_graph_roc_curve_binary(X_y_binary, data_type, make_data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
y_true = make_data_type(data_type, y_true)
y_pred_proba = make_data_type(data_type, y_pred_proba)
fig = graph_roc_curve(y_true, y_pred_proba)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Receiver Operating Characteristic"
assert len(fig_dict["data"]) == 2
roc_curve_data = roc_curve(y_true, y_pred_proba)[0]
assert np.array_equal(fig_dict["data"][0]["x"], roc_curve_data["fpr_rates"])
assert np.array_equal(fig_dict["data"][0]["y"], roc_curve_data["tpr_rates"])
assert np.allclose(
np.array(fig_dict["data"][0]["text"]).astype(float),
np.array(roc_curve_data["thresholds"]).astype(float),
)
assert fig_dict["data"][0]["name"] == "Class 1 (AUC {:06f})".format(
roc_curve_data["auc_score"]
)
assert np.array_equal(fig_dict["data"][1]["x"], np.array([0, 1]))
assert np.array_equal(fig_dict["data"][1]["y"], np.array([0, 1]))
assert fig_dict["data"][1]["name"] == "Trivial Model (AUC 0.5)"
def test_graph_roc_curve_nans():
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
one_val_y_zero = np.array([0])
with pytest.warns(UndefinedMetricWarning):
fig = graph_roc_curve(one_val_y_zero, one_val_y_zero)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert np.array_equal(fig_dict["data"][0]["x"], np.array([0.0, 1.0]))
assert np.allclose(
fig_dict["data"][0]["y"], np.array([np.nan, np.nan]), equal_nan=True
)
fig1 = graph_roc_curve(
np.array([np.nan, 1, 1, 0, 1]), np.array([0, 0, 0.5, 0.1, 0.9])
)
fig2 = graph_roc_curve(
np.array([1, 0, 1, 0, 1]), np.array([0, np.nan, 0.5, 0.1, 0.9])
)
assert fig1 == fig2
def test_graph_roc_curve_multiclass(binarized_ys):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true, y_tr, y_pred_proba = binarized_ys
fig = graph_roc_curve(y_true, y_pred_proba)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Receiver Operating Characteristic"
assert len(fig_dict["data"]) == 4
for i in range(3):
roc_curve_data = roc_curve(y_tr[:, i], y_pred_proba[:, i])[0]
assert np.array_equal(fig_dict["data"][i]["x"], roc_curve_data["fpr_rates"])
assert np.array_equal(fig_dict["data"][i]["y"], roc_curve_data["tpr_rates"])
assert np.allclose(
np.array(fig_dict["data"][i]["text"]).astype(float),
np.array(roc_curve_data["thresholds"]).astype(float),
)
assert fig_dict["data"][i]["name"] == "Class {name} (AUC {:06f})".format(
roc_curve_data["auc_score"], name=i + 1
)
assert np.array_equal(fig_dict["data"][3]["x"], np.array([0, 1]))
assert np.array_equal(fig_dict["data"][3]["y"], np.array([0, 1]))
assert fig_dict["data"][3]["name"] == "Trivial Model (AUC 0.5)"
with pytest.raises(
ValueError,
match="Number of custom class names does not match number of classes",
):
graph_roc_curve(y_true, y_pred_proba, custom_class_names=["one", "two"])
def test_graph_roc_curve_multiclass_custom_class_names(binarized_ys):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true, y_tr, y_pred_proba = binarized_ys
custom_class_names = ["one", "two", "three"]
fig = graph_roc_curve(y_true, y_pred_proba, custom_class_names=custom_class_names)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Receiver Operating Characteristic"
for i in range(3):
roc_curve_data = roc_curve(y_tr[:, i], y_pred_proba[:, i])[0]
assert np.array_equal(fig_dict["data"][i]["x"], roc_curve_data["fpr_rates"])
assert np.array_equal(fig_dict["data"][i]["y"], roc_curve_data["tpr_rates"])
assert fig_dict["data"][i]["name"] == "Class {name} (AUC {:06f})".format(
roc_curve_data["auc_score"], name=custom_class_names[i]
)
assert np.array_equal(fig_dict["data"][3]["x"], np.array([0, 1]))
assert np.array_equal(fig_dict["data"][3]["y"], np.array([0, 1]))
assert fig_dict["data"][3]["name"] == "Trivial Model (AUC 0.5)"
def test_graph_roc_curve_title_addition(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
fig = graph_roc_curve(y_true, y_pred_proba, title_addition="with added title text")
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"]
== "Receiver Operating Characteristic with added title text"
)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_graph_confusion_matrix_default(X_y_binary, data_type, make_data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred = np.round(y_true * rs.random(y_true.shape)).astype(int)
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
fig = graph_confusion_matrix(y_true, y_pred)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"]
== 'Confusion matrix, normalized using method "true"'
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Predicted Label"
assert np.all(fig_dict["layout"]["xaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "True Label"
assert np.all(fig_dict["layout"]["yaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["autorange"] == "reversed"
heatmap = fig_dict["data"][0]
conf_mat = confusion_matrix(y_true, y_pred, normalize_method="true")
conf_mat_unnormalized = confusion_matrix(y_true, y_pred, normalize_method=None)
assert np.array_equal(heatmap["x"], conf_mat.columns)
assert np.array_equal(heatmap["y"], conf_mat.columns)
assert np.array_equal(heatmap["z"], conf_mat)
assert np.array_equal(heatmap["customdata"], conf_mat_unnormalized)
assert (
heatmap["hovertemplate"]
== "<b>True</b>: %{y}<br><b>Predicted</b>: %{x}<br><b>Normalized Count</b>: %{z}<br><b>Raw Count</b>: %{customdata} <br><extra></extra>"
)
annotations = fig.__dict__["_layout_obj"]["annotations"]
# check that the figure has text annotations for the confusion matrix
for i in range(len(annotations)):
assert "text" in annotations[i]
def test_graph_confusion_matrix_norm_disabled(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred = np.round(y_true * rs.random(y_true.shape)).astype(int)
fig = graph_confusion_matrix(y_true, y_pred, normalize_method=None)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Confusion matrix"
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Predicted Label"
assert np.all(fig_dict["layout"]["xaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "True Label"
assert np.all(fig_dict["layout"]["yaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["autorange"] == "reversed"
heatmap = fig_dict["data"][0]
conf_mat = confusion_matrix(y_true, y_pred, normalize_method=None)
conf_mat_normalized = confusion_matrix(y_true, y_pred, normalize_method="true")
assert np.array_equal(heatmap["x"], conf_mat.columns)
assert np.array_equal(heatmap["y"], conf_mat.columns)
assert np.array_equal(heatmap["z"], conf_mat)
assert np.array_equal(heatmap["customdata"], conf_mat_normalized)
assert (
heatmap["hovertemplate"]
== "<b>True</b>: %{y}<br><b>Predicted</b>: %{x}<br><b>Raw Count</b>: %{z}<br><b>Normalized Count</b>: %{customdata} <br><extra></extra>"
)
def test_graph_confusion_matrix_title_addition(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred = np.round(y_true * rs.random(y_true.shape)).astype(int)
fig = graph_confusion_matrix(y_true, y_pred, title_addition="with added title text")
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"]
== 'Confusion matrix with added title text, normalized using method "true"'
)
def test_graph_permutation_importance(X_y_binary, test_pipeline):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
clf = test_pipeline
clf.fit(X, y)
fig = graph_permutation_importance(test_pipeline, X, y, "Log Loss Binary")
assert isinstance(fig, go.Figure)
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Permutation Importance<br><sub>"
"The relative importance of each input feature's overall "
"influence on the pipelines' predictions, computed using the "
"permutation importance algorithm.</sub>"
)
assert len(fig_dict["data"]) == 1
perm_importance_data = calculate_permutation_importance(
clf, X, y, "Log Loss Binary"
)
assert np.array_equal(
fig_dict["data"][0]["x"][::-1], perm_importance_data["importance"].values
)
assert np.array_equal(
fig_dict["data"][0]["y"][::-1], perm_importance_data["feature"]
)
@patch("evalml.model_understanding.graphs.calculate_permutation_importance")
def test_graph_permutation_importance_show_all_features(mock_perm_importance):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
mock_perm_importance.return_value = pd.DataFrame(
{"feature": ["f1", "f2"], "importance": [0.0, 0.6]}
)
figure = graph_permutation_importance(
test_pipeline, pd.DataFrame(), pd.Series(), "Log Loss Binary"
)
assert isinstance(figure, go.Figure)
data = figure.data[0]
assert np.any(data["x"] == 0.0)
@patch("evalml.model_understanding.graphs.calculate_permutation_importance")
def test_graph_permutation_importance_threshold(mock_perm_importance):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
mock_perm_importance.return_value = pd.DataFrame(
{"feature": ["f1", "f2"], "importance": [0.0, 0.6]}
)
with pytest.raises(
ValueError,
match="Provided importance threshold of -0.1 must be greater than or equal to 0",
):
fig = graph_permutation_importance(
test_pipeline,
pd.DataFrame(),
pd.Series(),
"Log Loss Binary",
importance_threshold=-0.1,
)
fig = graph_permutation_importance(
test_pipeline,
pd.DataFrame(),
pd.Series(),
"Log Loss Binary",
importance_threshold=0.5,
)
assert isinstance(fig, go.Figure)
data = fig.data[0]
assert np.all(data["x"] >= 0.5)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_cost_benefit_matrix_vs_threshold(
data_type, X_y_binary, logistic_regression_binary_pipeline_class, make_data_type
):
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
pipeline.fit(X, y)
original_pipeline_threshold = pipeline.threshold
cost_benefit_df = binary_objective_vs_threshold(pipeline, X, y, cbm, steps=5)
assert list(cost_benefit_df.columns) == ["threshold", "score"]
assert cost_benefit_df.shape == (6, 2)
assert not cost_benefit_df.isnull().all().all()
assert pipeline.threshold == original_pipeline_threshold
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_binary_objective_vs_threshold(
data_type, X_y_binary, logistic_regression_binary_pipeline_class, make_data_type
):
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
pipeline.fit(X, y)
# test objective with score_needs_proba == True
with pytest.raises(ValueError, match="Objective `score_needs_proba` must be False"):
binary_objective_vs_threshold(pipeline, X, y, "Log Loss Binary")
# test with non-binary objective
with pytest.raises(
ValueError, match="can only be calculated for binary classification objectives"
):
binary_objective_vs_threshold(pipeline, X, y, "f1 micro")
# test objective with score_needs_proba == False
results_df = binary_objective_vs_threshold(pipeline, X, y, "f1", steps=5)
assert list(results_df.columns) == ["threshold", "score"]
assert results_df.shape == (6, 2)
assert not results_df.isnull().all().all()
@patch("evalml.pipelines.BinaryClassificationPipeline.score")
def test_binary_objective_vs_threshold_steps(
mock_score, X_y_binary, logistic_regression_binary_pipeline_class
):
X, y = X_y_binary
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
pipeline.fit(X, y)
mock_score.return_value = {"Cost Benefit Matrix": 0.2}
cost_benefit_df = binary_objective_vs_threshold(pipeline, X, y, cbm, steps=234)
mock_score.assert_called()
assert list(cost_benefit_df.columns) == ["threshold", "score"]
assert cost_benefit_df.shape == (235, 2)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
@patch("evalml.model_understanding.graphs.binary_objective_vs_threshold")
def test_graph_binary_objective_vs_threshold(
mock_cb_thresholds,
data_type,
X_y_binary,
logistic_regression_binary_pipeline_class,
make_data_type,
):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
mock_cb_thresholds.return_value = pd.DataFrame(
{"threshold": [0, 0.5, 1.0], "score": [100, -20, 5]}
)
figure = graph_binary_objective_vs_threshold(pipeline, X, y, cbm)
assert isinstance(figure, go.Figure)
data = figure.data[0]
assert not np.any(np.isnan(data["x"]))
assert not np.any(np.isnan(data["y"]))
assert np.array_equal(data["x"], mock_cb_thresholds.return_value["threshold"])
assert np.array_equal(data["y"], mock_cb_thresholds.return_value["score"])
@patch("evalml.model_understanding.graphs.jupyter_check")
@patch("evalml.model_understanding.graphs.import_or_raise")
def test_jupyter_graph_check(
import_check, jupyter_check, X_y_binary, X_y_regression, test_pipeline
):
pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
X = X[:20, :5]
y = y[:20]
clf = test_pipeline
clf.fit(X, y)
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
graph_permutation_importance(test_pipeline, X, y, "log loss binary")
assert len(graph_valid) == 0
with pytest.warns(None) as graph_valid:
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0
jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
graph_partial_dependence(clf, X, features=0, grid_resolution=20)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
graph_binary_objective_vs_threshold(test_pipeline, X, y, cbm, steps=5)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_precision_recall_curve(y, y_pred_proba)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
graph_permutation_importance(test_pipeline, X, y, "log loss binary")
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_roc_curve(y, y_pred_proba)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
Xr, yr = X_y_regression
with pytest.warns(None) as graph_valid:
rs = get_random_state(42)
y_preds = yr * rs.random(yr.shape)
graph_prediction_vs_actual(yr, y_preds)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_get_prediction_vs_actual_data(data_type, make_data_type):
y_true = np.array([1, 2, 3000, 4, 5, 6, 7, 8, 9, 10, 11, 12])
y_pred = np.array([5, 4, 2, 8, 6, 6, 5, 1, 7, 2, 1, 3000])
y_true_in = make_data_type(data_type, y_true)
y_pred_in = make_data_type(data_type, y_pred)
with pytest.raises(ValueError, match="Threshold must be positive!"):
get_prediction_vs_actual_data(y_true_in, y_pred_in, outlier_threshold=-1)
outlier_loc = [2, 11]
results = get_prediction_vs_actual_data(
y_true_in, y_pred_in, outlier_threshold=2000
)
assert isinstance(results, pd.DataFrame)
assert np.array_equal(results["prediction"], y_pred)
assert np.array_equal(results["actual"], y_true)
for i, value in enumerate(results["outlier"]):
if i in outlier_loc:
assert value == "#ffff00"
else:
assert value == "#0000ff"
results = get_prediction_vs_actual_data(y_true_in, y_pred_in)
assert isinstance(results, pd.DataFrame)
assert np.array_equal(results["prediction"], y_pred)
assert np.array_equal(results["actual"], y_true)
assert (results["outlier"] == "#0000ff").all()
def test_graph_prediction_vs_actual_default():
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true = [1, 2, 3000, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_pred = [5, 4, 2, 8, 6, 6, 5, 1, 7, 2, 1, 3000]
fig = graph_prediction_vs_actual(y_true, y_pred)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Predicted vs Actual Values Scatter Plot"
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Prediction"
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "Actual"
assert len(fig_dict["data"]) == 2
assert fig_dict["data"][0]["name"] == "y = x line"
assert fig_dict["data"][0]["x"] == fig_dict["data"][0]["y"]
assert len(fig_dict["data"][1]["x"]) == len(y_true)
assert fig_dict["data"][1]["marker"]["color"] == "#0000ff"
assert fig_dict["data"][1]["name"] == "Values"
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_graph_prediction_vs_actual(data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_pred = [5, 4, 3, 8, 6, 3, 5, 9, 7, 12, 1, 2]
with pytest.raises(ValueError, match="Threshold must be positive!"):
graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=-1)
fig = graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=100)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Predicted vs Actual Values Scatter Plot"
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Prediction"
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "Actual"
assert len(fig_dict["data"]) == 2
assert fig_dict["data"][1]["marker"]["color"] == "#0000ff"
y_true = pd.Series(y_true)
y_pred = pd.Series(y_pred)
if data_type == "ww":
y_true = ww.init_series(y_true)
y_pred = ww.init_series(y_pred)
fig = graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=6.1)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Predicted vs Actual Values Scatter Plot"
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Prediction"
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "Actual"
assert len(fig_dict["data"]) == 3
assert fig_dict["data"][1]["marker"]["color"] == "#0000ff"
assert len(fig_dict["data"][1]["x"]) == 10
assert len(fig_dict["data"][1]["y"]) == 10
assert fig_dict["data"][1]["name"] == "< outlier_threshold"
assert fig_dict["data"][2]["marker"]["color"] == "#ffff00"
assert len(fig_dict["data"][2]["x"]) == 2
assert len(fig_dict["data"][2]["y"]) == 2
assert fig_dict["data"][2]["name"] == ">= outlier_threshold"
def test_get_prediction_vs_actual_over_time_data(ts_data):
X, y = ts_data
X_train, y_train = X.iloc[:15], y.iloc[:15]
X_test, y_test = X.iloc[15:], y.iloc[15:]
pipeline = TimeSeriesRegressionPipeline(
["Elastic Net Regressor"],
parameters={
"pipeline": {
"gap": 0,
"max_delay": 2,
"forecast_horizon": 1,
"date_index": None,
}
},
)
pipeline.fit(X_train, y_train)
results = get_prediction_vs_actual_over_time_data(
pipeline, X_test, y_test, X_train, y_train, pd.Series(X_test.index)
)
assert isinstance(results, pd.DataFrame)
assert list(results.columns) == ["dates", "target", "prediction"]
def test_graph_prediction_vs_actual_over_time(ts_data):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = ts_data
X_train, y_train = X.iloc[:15], y.iloc[:15]
X_test, y_test = X.iloc[15:], y.iloc[15:]
pipeline = TimeSeriesRegressionPipeline(
["Elastic Net Regressor"],
parameters={
"pipeline": {
"gap": 0,
"max_delay": 2,
"forecast_horizon": 1,
"date_index": None,
}
},
)
pipeline.fit(X_train, y_train)
fig = graph_prediction_vs_actual_over_time(
pipeline, X_test, y_test, X_train, y_train, pd.Series(X_test.index)
)
assert isinstance(fig, go.Figure)
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Prediction vs Target over time"
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Time"
assert (
fig_dict["layout"]["yaxis"]["title"]["text"] == "Target Values and Predictions"
)
assert len(fig_dict["data"]) == 2
assert fig_dict["data"][0]["line"]["color"] == "#1f77b4"
assert len(fig_dict["data"][0]["x"]) == X_test.shape[0]
assert not np.isnan(fig_dict["data"][0]["y"]).all()
assert len(fig_dict["data"][0]["y"]) == X_test.shape[0]
assert fig_dict["data"][1]["line"]["color"] == "#d62728"
assert len(fig_dict["data"][1]["x"]) == X_test.shape[0]
assert len(fig_dict["data"][1]["y"]) == X_test.shape[0]
assert not np.isnan(fig_dict["data"][1]["y"]).all()
def test_graph_prediction_vs_actual_over_time_value_error():
pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
class NotTSPipeline:
problem_type = ProblemTypes.REGRESSION
error_msg = "graph_prediction_vs_actual_over_time only supports time series regression pipelines! Received regression."
with pytest.raises(ValueError, match=error_msg):
graph_prediction_vs_actual_over_time(
NotTSPipeline(), None, None, None, None, None
)
def test_decision_tree_data_from_estimator_not_fitted(tree_estimators):
est_class, _ = tree_estimators
with pytest.raises(
NotFittedError,
match="This DecisionTree estimator is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.",
):
decision_tree_data_from_estimator(est_class)
def test_decision_tree_data_from_estimator_wrong_type(logit_estimator):
est_logit = logit_estimator
with pytest.raises(
ValueError,
match="Tree structure reformatting is only supported for decision tree estimators",
):
decision_tree_data_from_estimator(est_logit)
def test_decision_tree_data_from_estimator(fitted_tree_estimators):
est_class, est_reg = fitted_tree_estimators
formatted_ = decision_tree_data_from_estimator(est_reg)
tree_ = est_reg._component_obj.tree_
assert isinstance(formatted_, OrderedDict)
assert formatted_["Feature"] == f"Testing_{tree_.feature[0]}"
assert formatted_["Threshold"] == tree_.threshold[0]
assert all([a == b for a, b in zip(formatted_["Value"][0], tree_.value[0][0])])
left_child_feature_ = formatted_["Left_Child"]["Feature"]
right_child_feature_ = formatted_["Right_Child"]["Feature"]
left_child_threshold_ = formatted_["Left_Child"]["Threshold"]
right_child_threshold_ = formatted_["Right_Child"]["Threshold"]
left_child_value_ = formatted_["Left_Child"]["Value"]
right_child_value_ = formatted_["Right_Child"]["Value"]
assert left_child_feature_ == f"Testing_{tree_.feature[tree_.children_left[0]]}"
assert right_child_feature_ == f"Testing_{tree_.feature[tree_.children_right[0]]}"
assert left_child_threshold_ == tree_.threshold[tree_.children_left[0]]
assert right_child_threshold_ == tree_.threshold[tree_.children_right[0]]
# Check that the immediate left and right child of the root node have the correct values
assert all(
[
a == b
for a, b in zip(
left_child_value_[0], tree_.value[tree_.children_left[0]][0]
)
]
)
assert all(
[
a == b
for a, b in zip(
right_child_value_[0], tree_.value[tree_.children_right[0]][0]
)
]
)
def test_decision_tree_data_from_pipeline_not_fitted():
mock_pipeline = MulticlassClassificationPipeline(
component_graph=["Decision Tree Classifier"]
)
with pytest.raises(
NotFittedError,
match="The DecisionTree estimator associated with this pipeline is not fitted yet. "
"Call 'fit' with appropriate arguments before using this estimator.",
):
decision_tree_data_from_pipeline(mock_pipeline)
def test_decision_tree_data_from_pipeline_wrong_type():
mock_pipeline = MulticlassClassificationPipeline(
component_graph=["Logistic Regression Classifier"]
)
with pytest.raises(
ValueError,
match="Tree structure reformatting is only supported for decision tree estimators",
):
decision_tree_data_from_pipeline(mock_pipeline)
def test_decision_tree_data_from_pipeline_feature_length(X_y_categorical_regression):
mock_pipeline = RegressionPipeline(
component_graph=["One Hot Encoder", "Imputer", "Decision Tree Regressor"]
)
X, y = X_y_categorical_regression
mock_pipeline.fit(X, y)
assert (
len(mock_pipeline.input_feature_names[mock_pipeline.estimator.name])
== mock_pipeline.estimator._component_obj.n_features_
)
def test_decision_tree_data_from_pipeline(X_y_categorical_regression):
mock_pipeline = RegressionPipeline(
component_graph=["One Hot Encoder", "Imputer", "Decision Tree Regressor"]
)
X, y = X_y_categorical_regression
mock_pipeline.fit(X, y)
formatted_ = decision_tree_data_from_pipeline(mock_pipeline)
tree_ = mock_pipeline.estimator._component_obj.tree_
feature_names = mock_pipeline.input_feature_names[mock_pipeline.estimator.name]
assert isinstance(formatted_, OrderedDict)
assert formatted_["Feature"] == feature_names[tree_.feature[0]]
assert formatted_["Threshold"] == tree_.threshold[0]
assert all([a == b for a, b in zip(formatted_["Value"][0], tree_.value[0][0])])
left_child_feature_ = formatted_["Left_Child"]["Feature"]
right_child_feature_ = formatted_["Right_Child"]["Feature"]
left_child_threshold_ = formatted_["Left_Child"]["Threshold"]
right_child_threshold_ = formatted_["Right_Child"]["Threshold"]
left_child_value_ = formatted_["Left_Child"]["Value"]
right_child_value_ = formatted_["Right_Child"]["Value"]
assert left_child_feature_ == feature_names[tree_.feature[tree_.children_left[0]]]
assert right_child_feature_ == feature_names[tree_.feature[tree_.children_right[0]]]
assert left_child_threshold_ == tree_.threshold[tree_.children_left[0]]
assert right_child_threshold_ == tree_.threshold[tree_.children_right[0]]
# Check that the immediate left and right child of the root node have the correct values
assert all(
[
a == b
for a, b in zip(
left_child_value_[0], tree_.value[tree_.children_left[0]][0]
)
]
)
assert all(
[
a == b
for a, b in zip(
right_child_value_[0], tree_.value[tree_.children_right[0]][0]
)
]
)
def test_visualize_decision_trees_filepath(fitted_tree_estimators, tmpdir):
graphviz = pytest.importorskip(
"graphviz", reason="Skipping visualizing test because graphviz not installed"
)
est_class, _ = fitted_tree_estimators
filepath = os.path.join(str(tmpdir), "invalid", "path", "test.png")
assert not os.path.exists(filepath)
with pytest.raises(ValueError, match="Specified filepath is not writeable"):
visualize_decision_tree(estimator=est_class, filepath=filepath)
filepath = os.path.join(str(tmpdir), "test_0.png")
src = visualize_decision_tree(estimator=est_class, filepath=filepath)
assert os.path.exists(filepath)
assert src.format == "png"
assert isinstance(src, graphviz.Source)
def test_visualize_decision_trees_wrong_format(fitted_tree_estimators, tmpdir):
graphviz = pytest.importorskip(
"graphviz", reason="Skipping visualizing test because graphviz not installed"
)
est_class, _ = fitted_tree_estimators
filepath = os.path.join(str(tmpdir), "test_0.xyz")
with pytest.raises(
ValueError,
match=f"Unknown format 'xyz'. Make sure your format is one of the following: "
f"{graphviz.backend.FORMATS}",
):
visualize_decision_tree(estimator=est_class, filepath=filepath)
def test_visualize_decision_trees_est_wrong_type(logit_estimator, tmpdir):
est_logit = logit_estimator
filepath = os.path.join(str(tmpdir), "test_1.png")
with pytest.raises(
ValueError,
match="Tree visualizations are only supported for decision tree estimators",
):
visualize_decision_tree(estimator=est_logit, filepath=filepath)
def test_visualize_decision_trees_max_depth(tree_estimators, tmpdir):
est_class, _ = tree_estimators
filepath = os.path.join(str(tmpdir), "test_1.png")
with pytest.raises(
ValueError,
match="Unknown value: '-1'. The parameter max_depth has to be a non-negative integer",
):
visualize_decision_tree(estimator=est_class, max_depth=-1, filepath=filepath)
def test_visualize_decision_trees_not_fitted(tree_estimators, tmpdir):
est_class, _ = tree_estimators
filepath = os.path.join(str(tmpdir), "test_1.png")
with pytest.raises(
NotFittedError,
match="This DecisionTree estimator is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.",
):
visualize_decision_tree(estimator=est_class, max_depth=3, filepath=filepath)
def test_visualize_decision_trees(fitted_tree_estimators, tmpdir):
graphviz = pytest.importorskip(
"graphviz", reason="Skipping visualizing test because graphviz not installed"
)
est_class, est_reg = fitted_tree_estimators
filepath = os.path.join(str(tmpdir), "test_2")
src = visualize_decision_tree(
estimator=est_class, filled=True, max_depth=3, rotate=True, filepath=filepath
)
assert src.format == "pdf" # Check that extension defaults to pdf
assert isinstance(src, graphviz.Source)
filepath = os.path.join(str(tmpdir), "test_3.pdf")
src = visualize_decision_tree(estimator=est_reg, filled=True, filepath=filepath)
assert src.format == "pdf"
assert isinstance(src, graphviz.Source)
src = visualize_decision_tree(estimator=est_reg, filled=True, max_depth=2)
assert src.format == "pdf"
assert isinstance(src, graphviz.Source)
def test_linear_coefficients_errors():
dt = DecisionTreeRegressor()
with pytest.raises(
ValueError,
match="Linear coefficients are only available for linear family models",
):
get_linear_coefficients(dt)
lin = LinearRegressor()
with pytest.raises(ValueError, match="This linear estimator is not fitted yet."):
get_linear_coefficients(lin)
@pytest.mark.parametrize("estimator", [LinearRegressor, ElasticNetRegressor])
def test_linear_coefficients_output(estimator):
X = pd.DataFrame(
[[1, 2, 3, 5], [3, 5, 2, 1], [5, 2, 2, 2], [3, 2, 3, 3]],
columns=["First", "Second", "Third", "Fourth"],
)
y = | pd.Series([2, 1, 3, 4]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = | pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0) | pandas.concat |
import requests
import pandas as pd
import numpy as np
import configparser
from datetime import timedelta, datetime
from dateutil import relativedelta, parser, rrule
from dateutil.rrule import WEEKLY
class whoop_login:
'''A class object to allow a user to login and store their authorization code,
then perform pulls using the code in order to access different types of data'''
def __init__(self, auth_code=None, whoop_id=None,current_datetime=datetime.utcnow()):
self.auth_code=auth_code
self.whoop_id=whoop_id
self.current_datetime=current_datetime
self.start_datetime=None
self.all_data=None
self.all_activities=None
self.sport_dict=None
self.all_sleep=None
self.all_sleep_events=None
def pull_api(self, url,df=False):
auth_code=self.auth_code
headers={'authorization':auth_code}
pull=requests.get(url,headers=headers)
if pull.status_code==200 and len(pull.content)>1:
if df:
d=pd.json_normalize(pull.json())
return d
else:
return pull.json()
else:
return "no response"
def pull_sleep_main(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
main_df=pd.json_normalize(sleep)
return main_df
def pull_sleep_events(self,sleep_id):
athlete_id=self.whoop_id
sleep=self.pull_api('https://api-7.whoop.com/users/{}/sleeps/{}'.format(athlete_id,sleep_id))
events_df=pd.json_normalize(sleep['events'])
events_df['id']=sleep_id
return events_df
def get_authorization(self,user_ini):
'''
Function to get the authorization token and user id.
This must be completed before a user can query the api
'''
config=configparser.ConfigParser()
config.read(user_ini)
username=config['whoop']['username']
password=config['whoop']['password']
headers={
"username": username,
"password": password,
"grant_type": "password",
"issueRefresh": False}
auth = requests.post("https://api-7.whoop.com/oauth/token", json=headers)
if auth.status_code==200:
content=auth.json()
user_id=content['user']['id']
token=content['access_token']
start_time=content['user']['profile']['createdAt']
self.whoop_id=user_id
self.auth_code='bearer ' + token
self.start_datetime=start_time
print("Authentication successful")
else:
print("Authentication failed - please double check your credentials")
def get_keydata_all(self):
'''
This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
if self.start_datetime:
if self.all_data is not None:
## All data already pulled
return self.all_data
else:
start_date=parser.isoparse(self.start_datetime).replace(tzinfo=None)
end_time='T23:59:59.999Z'
start_time='T00:00:00.000Z'
intervals=rrule.rrule(freq=WEEKLY,interval=1,until=self.current_datetime, dtstart=start_date)
date_range=[[d.strftime('%Y-%m-%d') + start_time,
(d+relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d') + end_time] for d in intervals]
all_data=pd.DataFrame()
for dates in date_range:
cycle_url='https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(self.whoop_id,
dates[1],
dates[0])
data=self.pull_api(cycle_url,df=True)
all_data=pd.concat([all_data,data])
all_data.reset_index(drop=True,inplace=True)
## fixing the day column so it's not a list
all_data['days']=all_data['days'].map(lambda d: d[0])
all_data.rename(columns={"days":'day'},inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols=['qualityDuration','needBreakdown.baseline','needBreakdown.debt','needBreakdown.naps',
'needBreakdown.strain','needBreakdown.total']
for sleep_col in sleep_cols:
all_data['sleep.' + sleep_col]=all_data['sleep.' + sleep_col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
## Making nap variable
all_data['nap_duration']=all_data['sleep.naps'].apply(lambda x: x[0]['qualityDuration']/60000 if len(x)==1 else(
sum([y['qualityDuration'] for y in x if y['qualityDuration'] is not None])/60000 if len(x)>1 else 0))
all_data.drop(['sleep.naps'],axis=1,inplace=True)
## dropping duplicates subsetting because of list columns
all_data.drop_duplicates(subset=['day','sleep.id'],inplace=True)
self.all_data=all_data
return all_data
else:
print("Please run the authorization function first")
def get_activities_all(self):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe'''
if self.sport_dict:
sport_dict=self.sport_dict
else:
sports=self.pull_api('https://api-7.whoop.com/sports')
sport_dict={sport['id']:sport['name'] for sport in sports}
self.sport_dict=self.sport_dict
if self.start_datetime:
## process activity data
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull all data to process activities
data=self.get_keydata_all()
## now process activities data
act_data=pd.json_normalize(data[data['strain.workouts'].apply(len)>0]['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper','during.lower']]=act_data[['during.upper','during.lower']].apply(pd.to_datetime)
act_data['total_minutes']=act_data.apply(lambda x: (x['during.upper']-x['during.lower']).total_seconds()/60.0,axis=1)
for z in range(0,6):
act_data['zone{}_minutes'.format(z+1)]=act_data['zones'].apply(lambda x: x[z]/60000.)
act_data['sport_name']=act_data.sportId.apply(lambda x: sport_dict[x])
act_data['day']=act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones','during.bounds'],axis=1,inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities=act_data
return act_data
else:
print("Please run the authorization function first")
def get_sleep_all(self):
'''
This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents one night of sleep
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull timeframe data
data=self.get_keydata_all()
## getting all the sleep ids
if self.all_sleep is not None:
## All sleep data already pulled
return self.all_sleep
else:
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if pd.isna(x)==False]
all_sleep=pd.DataFrame()
for s in sleep_list:
m=self.pull_sleep_main(s)
all_sleep=pd.concat([all_sleep,m])
## Cleaning sleep data
sleep_update=['qualityDuration','latency','debtPre','debtPost','needFromStrain','sleepNeed',
'habitualSleepNeed','timeInBed','lightSleepDuration','slowWaveSleepDuration',
'remSleepDuration','wakeDuration','arousalTime','noDataDuration','creditFromNaps',
'projectedSleep']
for col in sleep_update:
all_sleep[col]=all_sleep[col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
all_sleep.drop(['during.bounds'],axis=1,inplace=True)
self.all_sleep=all_sleep.copy(deep=True)
all_sleep.drop(['events'],axis=1,inplace=True)
return all_sleep
else:
print("Please run the authorization function first")
def get_sleep_events_all(self):
'''
This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
All sleep times are returned in minutes.
'''
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
else:
## pull timeframe data
data=self.get_keydata_all(start,end)
## getting all the sleep ids
if self.all_sleep_events is not None:
## All sleep data already pulled
return self.all_sleep_events
else:
if self.all_sleep is not None:
sleep_events=self.all_sleep[['activityId','events']]
all_sleep_events=pd.concat([pd.concat([pd.json_normalize(events),
pd.DataFrame({'id':len(events)*[sleep]})],axis=1) for events, sleep in zip(sleep_events['events'],sleep_events['activityId'])])
else:
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if pd.isna(x)==False]
all_sleep_events=pd.DataFrame()
for s in sleep_list:
events=self.pull_sleep_events(s)
all_sleep_events=pd.concat([all_sleep_events,events])
## Cleaning sleep events data
all_sleep_events['during.lower']=pd.to_datetime(all_sleep_events['during.lower'])
all_sleep_events['during.upper']=pd.to_datetime(all_sleep_events['during.upper'])
all_sleep_events.drop(['during.bounds'],axis=1,inplace=True)
all_sleep_events['total_minutes']=all_sleep_events.apply(lambda x: (x['during.upper']-x['during.lower']).total_seconds()/60.0,axis=1)
self.all_sleep_events=all_sleep_events
return all_sleep_events
else:
print("Please run the authorization function first")
def get_hr_all(self,df=False):
'''
This function will pull every heart rate measurement recorded for the life of WHOOP membership.
The default return for this function is a list of lists, where each "row" contains the date, time, and hr value.
The measurements are spaced out every ~6 seconds on average.
To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.
NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,
so be careful when you pull, it may take a while.
'''
if self.start_datetime:
athlete_id=self.whoop_id
start_date=parser.isoparse(self.start_datetime).replace(tzinfo=None)
end_time='T23:59:59.999Z'
start_time='T00:00:00.000Z'
intervals=rrule.rrule(freq=WEEKLY,interval=1,until=self.current_datetime, dtstart=start_date)
date_range=[[d.strftime('%Y-%m-%d') + start_time,
(d+relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d') + end_time] for d in intervals]
hr_list=[]
for dates in date_range:
start=dates[0]
end=dates[1]
ul='''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(athlete_id,end,start)
hr_vals=self.pull_api(ul)['values']
hr_values=[[datetime.utcfromtimestamp(h['time']/1e3).date(),
datetime.utcfromtimestamp(h['time']/1e3).time(),
h['data']] for h in hr_vals]
hr_list.extend(hr_values)
if df:
hr_df=pd.DataFrame(hr_list)
hr_df.columns=['date','time','hr']
return hr_df
else:
return hr_list
else:
print("Please run the authorization function first")
def get_keydata_timeframe(self,start,end=datetime.strftime(datetime.utcnow(),"%Y-%m-%d")):
'''
This function returns a dataframe of WHOOP metrics for each day in a specified time period.
To use this function, provide a start and end date in string format as follows "YYYY-MM-DD".
If no end date is specified, it will default to today's date.
In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information
'''
st=datetime.strptime(start,'%Y-%m-%d')
e=datetime.strptime(end,'%Y-%m-%d')
if st>e:
if e>datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print("Please enter a start date that is earlier than your end date")
else:
if self.auth_code:
end_time='T23:59:59.999Z'
start_time='T00:00:00.000Z'
intervals=rrule.rrule(freq=WEEKLY,interval=1,until=e, dtstart=st)
date_range=[[d.strftime('%Y-%m-%d') + start_time,
(d+relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d') + end_time] for d in intervals if d<=e]
time_data=pd.DataFrame()
for dates in date_range:
cycle_url='https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(self.whoop_id,
dates[1],
dates[0])
data=self.pull_api(cycle_url,df=True)
time_data=pd.concat([time_data,data])
time_data.reset_index(drop=True,inplace=True)
## fixing the day column so it's not a list
time_data['days']=time_data['days'].map(lambda d: d[0])
time_data.rename(columns={"days":'day'},inplace=True)
## Putting all time into minutes instead of milliseconds
sleep_cols=['qualityDuration','needBreakdown.baseline','needBreakdown.debt','needBreakdown.naps',
'needBreakdown.strain','needBreakdown.total']
for sleep_col in sleep_cols:
time_data['sleep.' + sleep_col]=time_data['sleep.' + sleep_col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
## Making nap variable
time_data['nap_duration']=time_data['sleep.naps'].apply(lambda x: x[0]['qualityDuration']/60000 if len(x)==1 else(
sum([y['qualityDuration'] for y in x if y['qualityDuration'] is not None])/60000 if len(x)>1 else 0))
time_data.drop(['sleep.naps'],axis=1,inplace=True)
## removing duplicates
time_data.drop_duplicates(subset=['day','sleep.id'],inplace=True)
return time_data
else:
print("Please run the authorization function first")
def get_activities_timeframe(self,start,end=datetime.strftime(datetime.utcnow(),"%Y-%m-%d")):
'''
Activity data is pulled through the get_keydata functions so if the data pull is present, this function
just transforms the activity column into a dataframe of activities, where each activity is a row.
If it has not been pulled, this function runs the key data function then returns the activity dataframe
If no end date is specified, it will default to today's date.
'''
st=datetime.strptime(start,'%Y-%m-%d')
e=datetime.strptime(end,'%Y-%m-%d')
if st>e:
if e>datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print("Please enter a start date that is earlier than your end date")
else:
if self.auth_code:
if self.sport_dict:
sport_dict=self.sport_dict
else:
sports=self.pull_api('https://api-7.whoop.com/sports')
sport_dict={sport['id']:sport['name'] for sport in sports}
self.sport_dict=self.sport_dict
## process activity data
if self.all_data is not None:
## use existing
data=self.all_data
data=data[(data.day>=start)&(data.day<=end)].copy(deep=True)
else:
## pull timeframe data
data=self.get_keydata_timeframe(start,end)
## now process activities data
act_data=pd.json_normalize(data[data['strain.workouts'].apply(len)>0]['strain.workouts'].apply(lambda x: x[0]))
act_data[['during.upper','during.lower']]=act_data[['during.upper','during.lower']].apply(pd.to_datetime)
act_data['total_minutes']=act_data.apply(lambda x: (x['during.upper']-x['during.lower']).total_seconds()/60.0,axis=1)
for z in range(0,6):
act_data['zone{}_minutes'.format(z+1)]=act_data['zones'].apply(lambda x: x[z]/60000.)
act_data['sport_name']=act_data.sportId.apply(lambda x: sport_dict[x])
act_data['day']=act_data['during.lower'].dt.strftime('%Y-%m-%d')
act_data.drop(['zones','during.bounds'],axis=1,inplace=True)
act_data.drop_duplicates(inplace=True)
self.all_activities=act_data
return act_data
else:
print("Please run the authorization function first")
def get_sleep_timeframe(self,start,end=datetime.strftime(datetime.utcnow(),"%Y-%m-%d")):
'''
This function returns sleep metrics in a data frame, for timeframe specified by the user.
Each row in the data frame represents one night of sleep.
If no end date is specified, it will default to today's date.
All sleep times are returned in minutes.
'''
st=datetime.strptime(start,'%Y-%m-%d')
e=datetime.strptime(end,'%Y-%m-%d')
if st>e:
if e>datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print("Please enter a start date that is earlier than your end date")
else:
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
data=data[(data.day>=start)&(data.day<=end)].copy(deep=True)
else:
## pull timeframe data
data=self.get_keydata_timeframe(start,end)
## getting all the sleep ids
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if pd.isna(x)==False]
if self.all_sleep is not None:
## All sleep data already pulled so just filter
all_sleep=self.all_sleep
time_sleep=all_sleep[all_sleep.activityId.isin(sleep_list)]
return time_sleep
else:
time_sleep=pd.DataFrame()
for s in sleep_list:
m=self.pull_sleep_main(s)
time_sleep=pd.concat([time_sleep,m])
## Cleaning sleep data
sleep_update=['qualityDuration','latency','debtPre','debtPost','needFromStrain','sleepNeed',
'habitualSleepNeed','timeInBed','lightSleepDuration','slowWaveSleepDuration',
'remSleepDuration','wakeDuration','arousalTime','noDataDuration','creditFromNaps',
'projectedSleep']
for col in sleep_update:
time_sleep[col]=time_sleep[col].astype(float).apply(lambda x: np.nan if np.isnan(x) else x/60000)
time_sleep.drop(['during.bounds','events'],axis=1,inplace=True)
return time_sleep
else:
print("Please run the authorization function first")
def get_sleep_events_timeframe(self,start,end=datetime.strftime(datetime.utcnow(),"%Y-%m-%d")):
'''
This function returns sleep events in a data frame, for the time frame specified by the user.
Each row in the data frame represents an individual sleep event within an individual night of sleep.
Sleep events can be joined against the sleep or main datasets by sleep id.
If no end date is specified, it will default to today's date.
'''
st=datetime.strptime(start,'%Y-%m-%d')
e=datetime.strptime(end,'%Y-%m-%d')
if st>e:
if e>datetime.today():
print("Please enter an end date earlier than tomorrow")
else:
print("Please enter a start date that is earlier than your end date")
else:
if self.auth_code:
if self.all_data is not None:
## use existing
data=self.all_data
data=data[(data.day>=start)&(data.day<=end)].copy(deep=True)
else:
## pull timeframe data
data=self.get_keydata_timeframe(start,end)
## getting all the sleep ids
sleep_ids=data['sleep.id'].values.tolist()
sleep_list=[int(x) for x in sleep_ids if pd.isna(x)==False]
if self.all_sleep_events is not None:
## All sleep data already pulled so just filter
all_sleep_events=self.all_sleep_events
time_sleep_events=all_sleep_events[all_sleep_events.id.isin(sleep_list)]
return time_sleep_events
else:
if self.all_sleep is not None:
sleep_events=self.all_sleep[['activityId','events']]
time_sleep=sleep_events[sleep_events.id.isin(sleep_list)]
time_sleep_events=pd.concat([pd.concat([pd.json_normalize(events),
pd.DataFrame({'id':len(events)*[sleep]})],axis=1) for events, sleep in zip(time_sleep['events'],time_sleep['activityId'])])
else:
time_sleep_events= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
"""
Testing a 1D two-state (unsupervised) GMM classifier
The motivation for this simple scheme was to see how well the EMG RMS power
could predict Wake/Sleep states, assuming REM is folded into Sleep.
This 1D two-state GMM scheme is applied (independently) to each feature in the
incoming std
"""
import json
import argparse
import os
import pdb
import pandas as pd
import numpy as np
import tsm1d
import remtools as rt
import scoreblock as sb
from tracedumper import TraceDumper
def two_state_prediction(std=None, pdiff=0.95, scoremap_hum={}, scoremap_gmm={}):
"""
- build a two state (1D) GMM classifier for each feature
- predict scores
- map human predictions to two states
- build scoreblock of human and model scores
input
------
returns
------
examples:
scoremap_gmm = {-1:'Switch', 0:'Sleep', 1:'Wake'}
scoremap_hum = {'Non REM':'Sleep', 'REM':'Sleep'}
"""
features = std.features
scoreblock = std.scoreblock
tagDict = std.tagDict
X = features.data
# for each feature, build GMM and predict scores
ndx, data = [], []
for i, row in features.df_index.iterrows():
print(tagDict, row['tag'])
# print(features.df_index)
# GMM model scores
myGMM = tsm1d.TwoStateGMMClassifier.from_data(X[i])
data.append(myGMM.predict(X[i], pdiff=pdiff))
# indexing
dd = {k:v for k,v in tagDict.items()}
dd.update(row)
dd['scoreType'] = 'model'
dd['classifier'] = 'TwoStateGMM'
dd['pdiff'] = pdiff
dd['scoreTag'] = dd['tag']
ndx.append(dd)
# make a scoreblock
df_index = pd.DataFrame(data=ndx)
df_data = | pd.DataFrame(data, columns=scoreblock.data_cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 17:28:54 2018
@author: galengao
This is the original analysis code as it exists in the environment where it was writen and initially run.
Portions and modifications of this script constitute all other .py scripts in this directory.
"""
import numpy as np
import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
### Helper Function to Load in the Data ###
def load_data(coh, thresh=False):
"""Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs
for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)
one level up from this script."""
if thresh:
hg38 = '../hg38_gistic/'+coh+'/all_thresholded.by_genes.txt'
hg19 = '../hg19_gistic/'+coh+'/all_thresholded.by_genes.txt'
hg38drops = ['Cytoband', 'Locus ID']
else:
hg38 = '../hg38_gistic/'+coh+'/all_data_by_genes.txt'
hg19 = '../hg19_gistic/'+coh+'/all_data_by_genes.txt'
hg38drops = ['Cytoband', 'Gene ID']
df_hg19 = pd.read_table(hg19, index_col=[0]).drop(['Cytoband', 'Locus ID'], axis=1)
df_hg38 = pd.read_table(hg38, index_col=[0]).drop(hg38drops, axis=1)
same_samps = list(set(df_hg38.columns) & set(df_hg19.columns))
same_genes = list(set(df_hg38.index) & set(df_hg19.index))
print(coh, len(same_genes), len(same_samps))
return df_hg38[same_samps].T[same_genes], df_hg19[same_samps].T[same_genes]
return df_hg38, df_hg19
### Raw Copy Number Values Analysis Code ###
def raw_value_comparison(coh, plot=False):
"""Return the average differences in raw copy number values between the
gene-level calls in hg19 and hg38 for each gene for a given tumor type
'coh.' If plot=True, plot the genes' differences in a histogram."""
# load in the data
df_38, df_19 = load_data(coh, thresh=False)
# compute average sample-by-sample differences for each gene
df_s = df_38 - df_19
avg_diff = {g:np.average(df_s[g]) for g in df_s.columns.get_level_values('Gene Symbol')}
# take note of which genes are altered more than our threshold of 4*std
results = []
std = np.std([avg_diff[x] for x in avg_diff])
for g in avg_diff:
if avg_diff[g] > 4 * std:
results.append([coh, 'Pos', g, avg_diff[g]])
elif avg_diff[g] < -4 * std:
results.append([coh, 'Neg', g, avg_diff[g]])
if plot:
plt.hist([avg_diff[x] for x in avg_diff], bins=1000)
plt.title(coh, fontsize=16)
plt.xlabel('Average CN Difference Between Alignments', fontsize=14)
plt.ylabel('Genes', fontsize=14)
sns.despine()
plt.savefig('./genehists/'+coh+'_genehist.pdf')
plt.savefig('./genehists/'+coh+'_genehist.png')
plt.clf()
return results
def sequential_cohort_test_raw_values(cohs, plot=False):
"""Sequentially compare raw gene-level calls for the given tumor types."""
c_results = []
for coh in cohs: # perform raw value comparison for each cohort
c_results += raw_value_comparison(coh, plot=plot)
# compile results together
df_r = | pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference']) | pandas.DataFrame |
# authors: <NAME>
# date: 2020-03-02
"""
The pypuck functions are used as wrapper functions to call the NHL.com
publicly available API's.
"""
import requests
import pandas as pd
import altair as alt
from pypuck.helpers import helpers
def player_stats(start_date=None, end_date=None):
"""
Query the top 100 player's stats (sorted by total points)
from the players summary report endpoint on the NHL.com API.
The stats are queried on an aggregated game-by-game basis
for a range of dates. If no date is specified the function will return
the players stats for the current season. The stats to be
returned are restricted to the regular season.
The valid dates range from the start of the 1917 season until the
current day.
The function will return the current season's stats if the arguments
are blank (i.e. left as None).
You can find the glossary pertaining to the returned
columns by going to http://www.nhl.com/stats/glossary.
Parameters
----------
start_date : str (default None).
The stat start date string in 'YYYY-MM-DD' format.
end_date : str (default None)
The stat end date string in 'YYYY-MM-DD' format.
Returns
-------
pandas.core.DataFrame
The player's stats in a dataframe sorted by total points.
Examples
--------
>>> from pypuck import pypuck
>>> pypuck.player_stats(start_date='2019-10-02', end_date='2020-02-28')
assists | evGoals | evPoints | faceoffWinPct | ...
--------------------------------------------------
67 | 27 | 66 | 0.51641 | ...
--------------------------------------------------
...
"""
# Set dates to current season if none
start_date = '2019-10-02' if start_date is None else start_date
end_date = '2020-04-11' if end_date is None else end_date
# Check that the arguments are of the correct type,
# in the correct format, and in the correct order
helpers.check_argument_type(start_date, 'start_date', str)
helpers.check_argument_type(end_date, 'end_date', str)
helpers.check_date_format(start_date)
helpers.check_date_format(end_date)
helpers.check_date(start_date, end_date)
# Specify the URL
url = 'https://api.nhle.com/stats/rest/en/skater/summary?' +\
'isAggregate=true&' +\
'isGame=true&' +\
'sort=[{"property":"points","direction":"DESC"},' +\
'{"property":"goals","direction":"DESC"},' +\
'{"property":"assists","direction":"DESC"}]&' +\
'start=0&' +\
'limit=100&' +\
'factCayenneExp=gamesPlayed>=1&' +\
f'cayenneExp=gameDate<="{end_date}" and ' +\
f'gameDate>="{start_date}" and gameTypeId=2'
# Make the API request
page = requests.get(url)
# Check the response code is valid - i.e. the API didn't fail
helpers.check_response_code(page.status_code)
# Return the top 100 players dataframe
return pd.DataFrame(page.json()['data'])
def attendance(regular=True, playoffs=True,
start_season=None, end_season=None):
"""
Query the NHL attendance number from 1975 to 2019 from the NHL records API.
The attendance represents annual attendance numbers for all teams.
The user can specify to return either the regular season attendance,
playoff attendance numbers, or both.
The function will display a chart showing the attendance over the
specified time period.
Parameters
----------
regular : boolean (default True).
Whether to query seasonal regular season attendance data.
playoffs : boolean (default True)
Whether to query seasonal playoff attendance data.
start_season : int (default None)
The start season is integer ranging from 1975 to 2018.
end_season : int (default None)
The end season is integer ranging from 1976 to 2019.
Returns
-------
altair.vegalite.v3.api.Chart
It wil display attendance numbers in an Altair chart.
Examples
--------
>>> from pypuck import pypuck
>>> pypuck.attendance(regular=True, playoffs=True,
start_season=2000, end_season=2019)
...
"""
# Specify the URL
url = 'https://records.nhl.com/site/api/attendance'
# Make the API request
page = requests.get(url)
# Check the response code is valid - i.e. the API didn't fail
helpers.check_response_code(page.status_code)
df = pd.DataFrame(page.json()['data']).sort_values(by=['seasonId'])
df = df.fillna(0)
df.playoffAttendance = df.playoffAttendance.astype(int)
df.regularAttendance = df.regularAttendance.astype(int)
df = df.rename(columns={'regularAttendance': 'regular',
'playoffAttendance': 'playoff'})
# set start season and end season to default value if none
if pd.isnull(start_season):
start_season = 1975
if pd.isnull(end_season):
end_season = 2019
# check if a proper input is given
helpers.check_argument_type(regular, 'regular', bool)
helpers.check_argument_type(playoffs, 'playoffs', bool)
if start_season not in range(1975, 2019):
raise Exception('Start season is out of range')
if end_season not in range(1976, 2020):
raise Exception('End season is out of range')
if end_season <= start_season:
raise Exception('End season should be not be '
'earlier than the start season')
start_season = int(str(start_season) + str(start_season))
end_season = int(str(end_season) + str(end_season))
df = df.query('seasonId >= @start_season and seasonId <= @end_season')
if regular is True and playoffs is True:
# plot both regular attendance and playoff attendance
plot1 = alt.Chart(df, title="Regular Attendance").mark_bar().encode(
alt.X('seasonId:N', title="Season"),
alt.Y('regular:Q', title='Regular Attendance'))
plot2 = alt.Chart(df, title="Playoff Attendance").mark_bar().encode(
alt.X('seasonId:N', title="Season"),
alt.Y('playoff:Q', title='Playoff Attendance'))
plot = (plot1 | plot2)
elif regular is True:
# plot regular attendance if it is requested only
plot = alt.Chart(df, title="Regular Attendance").mark_bar().encode(
alt.X('seasonId:N', title="Season"),
alt.Y('regular:Q', title='Regular Attendance'))
elif playoffs is True:
# plot playoff attendance if it is requested only
plot = alt.Chart(df, title="Playoff Attendance").mark_bar().encode(
alt.X('seasonId:N', title="Season"),
alt.Y('playoff:Q', title='Playoff Attendance'))
else:
raise Exception('Must select at least one attendance type')
return plot
def team_stats(start_season="20192020", end_season="20192020"):
"""
Get team season stats specified by start year or start year and end year.
If no year is specified then the year 2019-2020 is default.
If an end year is specified then the start year is also to be provided.
year is to be provided in a 2 year format of YYYYYYYY.
The valid seasons range from the 1917 season until the
current season.
You can find the glossary pertaining to the returned
columns by going to http://www.nhl.com/stats/glossary.
Parameters
----------
start_season : str
The stat start year string in 'YYYYYYYY' format.
end_season : str
The stat end year string in 'YYYYYYYY' format.
Returns
-------
pandas.core.DataFrame
The team's seasonal stats in a dataframe.
Examples
--------
>>> from pypuck import pypuck
>>> start_season = '19801981'
>>> end_season = '19891990'
>>> pypuck.team_stats(start_season=start_season, end_season=end_season)
faceoffWinPct | gamesPlayed | goalsAgainst | goalsAgainstPerGame | ...
-----------------------------------------------------------------------
0.481361 | 82 | 251 | 3.06097 | ...
-----------------------------------------------------------------------
...
"""
# Check that the arguments are of the correct type (i.e. str)
helpers.check_argument_type(start_season, 'start_season', str)
helpers.check_argument_type(end_season, 'end_season', str)
helpers.check_season_format(start_season)
helpers.check_season_format(end_season)
helpers.check_seasons(start_season, end_season)
base_url = 'https://api.nhle.com/stats/rest/en/team/summary?'
arguments = 'cayenneExp=gameTypeId=2' +\
f' and seasonId<={end_season}' +\
f' and seasonId>={start_season}'
# Make the api request
page = requests.get(base_url + arguments)
# Check the response code is valid - i.e. the API didn't fail
helpers.check_response_code(page.status_code)
df = pd.DataFrame(page.json()['data'])
return df
def draft_pick(pick_number=1, round_number=None, year=None):
"""
The function returns information about draft picks for the specified
parameters and stores them in a pandas data frame.
If year is not specified, then all of the draft picks
for all year will be returned.
If no round is specified the data frame will include all players
with chosen pick number from every round.
There are cases when even though user entered valid parameters,
the output would be empty if a pick number didn't exist
in a specified round, an assert error would be raised.
Parameters
----------
pick_number : int (default 1).
Desired pick number, must be in the range [1,38].
If nothing is specified, picks first draft in all rounds.
round_number : int (default None).
Desired round number, must be in the range [1,25]
year : int (default None).
Year in which a draft took place. Must be YYYY format,
that contains year in a range [1963,2019].
Returns
-------
pandas.core.DataFrame
Drafts with specified parameters.
Examples
--------
>>> from pypuck import pypuck
>>> pick_number = 9
>>> round_number = 7
>>> year = 2000
>>> pypuck.draft_pick(pick_number = pick_number,
round_number=round_number, year=year)
Player | Round_num | Pick_num | Tri_code | Year | ...
------------------------------------------------
<NAME> | 7 | 9 | LAK | 2000 | ...
------------------------------------------------
"""
# Check that the arguments are of the correct type (i.e. int) and value
helpers.check_argument_type(pick_number, 'pick_number', int)
assert pick_number in range(1, 38), (
'Number of pick is out of avaliable range')
if round_number is not None:
helpers.check_argument_type(round_number, 'round_number', int)
assert round_number in range(1, 25), (
'Number of round is out of avaliable range')
if year is not None:
helpers.check_argument_type(year, 'year', int)
assert year in range(1963, 2019), 'Year is out if avaliable range'
api = requests.get("https://records.nhl.com/site/api/draft").json()
stats = | pd.DataFrame(api['data']) | pandas.DataFrame |
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import seaborn as sns
from gensim.models.ldamulticore import LdaMulticore
#load the files
data_files = ["data/pubmed_articles_cancer_01_smaller.csv", "data/pubmed_articles_cancer_02_smaller.csv",
"data/pubmed_articles_cancer_03_smaller.csv","data/pubmed_articles_cancer_04_smaller.csv"]
input_data = | pd.DataFrame() | pandas.DataFrame |
"""
Calculate MQA scores only for the resolved region from local score.
MQA methods:
- DeepAccNet
- P3CMQA
- ProQ3D
- VoroCNN
"""
import argparse
import os
import subprocess
import tarfile
from pathlib import Path
from typing import Any, List, Union
import numpy as np
import pandas as pd
from prody import parsePDB, writePDB
from tqdm import tqdm
data_dir = Path('../../data')
interim_path = data_dir / 'interim'
score_dir = data_dir / 'out/dataset/score/mqa'
def open_tar(tar_file: Union[str, Path]) -> tarfile.TarFile:
return tarfile.open(tar_file, 'r:gz')
def get_resolved_pdb(target: str, resolved_indices: List[int]) -> Path:
target_pdb_dir = data_dir / 'out/dataset/alphafold_output' / target
pdb_resolved_dir = data_dir / 'out/dataset/pdb/pdb_resolved'
pdb_resolved_target_dir = pdb_resolved_dir / target
pdb_resolved_target_dir.mkdir(parents=True, exist_ok=True)
for pdb in target_pdb_dir.glob('*.pdb'):
pdb_name = pdb.stem
output_pdb_path = pdb_resolved_target_dir / f'{pdb_name}.pdb'
if output_pdb_path.exists():
continue
mol = parsePDB(pdb)
resindices = mol.getResnums() - 1
resolved_atom_indices = np.where(np.isin(resindices, resolved_indices))[0]
mol_resolved = mol[resolved_atom_indices]
writePDB(str(output_pdb_path), mol_resolved)
return pdb_resolved_target_dir
class CalcResolvedConfidence:
missing_dict = np.load(interim_path / 'missing_residues.npy', allow_pickle=True).item()
def __init__(self, method: str, target_csv: Union[str, Path]):
self.method = method
self.target_df = pd.read_csv(target_csv, index_col=0)
def __call__(self, *args: Any, **kwds: Any) -> Any:
results = []
with tqdm(self.target_df.iterrows(), total=len(self.target_df)) as pbar:
for _, row in pbar:
target = row['id']
pbar.set_description(f'Target = {target}')
length = row['length']
result = self.for_target(target, length)
results.append(result)
if sum([1 if result is None else 0 for result in results]) > 0:
print(f'{self.method} calculation not yet finished')
exit()
return pd.concat(results)
def for_target(self, target: str, length: int) -> Union[pd.DataFrame, None]:
resolved_indices = self.get_resolved_indices(target, length)
if self.method == 'DeepAccNet' or self.method == 'DeepAccNet-Bert':
result = self.DeepAccNet(target, length)
elif self.method == 'P3CMQA' or self.method == 'Sato-3DCNN':
result = self.P3CMQA(target, resolved_indices)
elif self.method == 'ProQ3D':
result = self.ProQ3D(target, resolved_indices)
elif self.method == 'VoroCNN':
result = self.VoroCNN(target, resolved_indices)
elif self.method == 'DOPE':
result = self.DOPE(target, resolved_indices)
elif self.method == 'SBROD':
result = self.SBROD(target, resolved_indices)
else:
raise ValueError(f'Unknown method: {self.method}')
return result
@classmethod
def get_resolved_indices(cls, target: str, length: int) -> List[int]:
return np.setdiff1d(np.arange(length), cls.missing_dict[target])
def DeepAccNet(self, target: str, length: int) -> Union[pd.DataFrame, None]:
deepaccnet_path = score_dir / 'DeepAccNet'
result_path = deepaccnet_path / f'{target}_resolved.csv'
# if calculation already finished
if result_path.exists():
result_df = pd.read_csv(result_path, index_col=0)
return result_df
# if calculation not yet finished
os.chdir('DeepAccNet')
cmd = ['qsub', '-g', 'tga-ishidalab', './get_score_resolved.sh', target, str(length)]
subprocess.run(cmd)
os.chdir('..')
return None
def P3CMQA(self, target: str, resolved_indices: List[int]) -> pd.DataFrame:
p3cmqa_path = score_dir / self.method
tar_path = p3cmqa_path / f'{target}.tar.gz'
tar = open_tar(tar_path)
results = []
for tarinfo in tar:
if tarinfo.name.endswith('.csv'):
if Path(tarinfo.name).stem == target:
continue
f = tar.extractfile(tarinfo.name)
local_df = | pd.read_csv(f, index_col=0) | pandas.read_csv |
"""
This script contains experiment set ups for results in figure 1.
"""
import os
import pandas as pd
from experiment_Setup import Experiment_Setup
from agent_env import get_pi_env
from SVRG import *
if __name__ == '__main__':
NUM_RUNS = 10
# Random MDP
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1e-3, "sigma_omega": 1e-3,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":50, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1e-3, 'sigma_omega': 1e-3,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 50, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.1, "batch_svrg_increment_ratio": 1.05},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=50, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="rmdp", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="zero", num_data=5000)
results.extend(pi_env.run_policy_iteration())
pd.DataFrame(pi_results).to_pickle('./rmdp_results.pkl')
# Mountain Car
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1e-1, "sigma_omega": 1e-1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":20, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1e-1, 'sigma_omega': 1e-1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 20, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.2, "batch_svrg_increment_ratio": 1.1},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=20, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="mc", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="zero", num_data=5000)
results.extend(pi_env.run_policy_iteration())
pd.DataFrame(pi_results).to_pickle('./mc_results.pkl')
# Cart Pole
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1, "sigma_omega": 1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":50, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1, 'sigma_omega': 1,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 50, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.1, "batch_svrg_increment_ratio": 1.05},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=50, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="cp", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="zero", num_data=5000)
results.extend(pi_env.run_policy_iteration())
pd.DataFrame(pi_results).to_pickle('./cp_results.pkl')
# Acrobot
alg_settings = [
{"method": svrg_classic, "name": "svrg", "sigma_theta": 1e-2, "sigma_omega": 1e-2,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch":True,
"num_epoch":50, "num_checks":10, "inner_loop_multiplier":1
},
{"method": batch_svrg, "name": 'batch_svrg', "sigma_theta": 1e-2, 'sigma_omega': 1e-2,
"grid_search": False, "record_per_dataset_pass": False, "record_per_epoch": True,
"num_epoch": 50, "num_checks": 10, "inner_loop_multiplier": 1,
"batch_svrg_init_ratio": 0.1, "batch_svrg_increment_ratio": 1.05},
]
results = []
for i in range(NUM_RUNS):
exp_setup = Experiment_Setup(num_epoch=50, exp_settings=alg_settings, saving_dir_path="./",
multi_process_exps=False, use_gpu=False, num_processes=1,
batch_size=100, num_workers=0)
pi_env = get_pi_env(env_type="ab", exp_setup=exp_setup, loading_path="", is_loading=False, saving_path="./", is_saving=True,
policy_iteration_episode=1, init_method="random", num_data=5000)
results.extend(pi_env.run_policy_iteration())
| pd.DataFrame(pi_results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun April 7 18:51:20 2020
@author: omars
"""
# %% Libraries
from mdp_utils import fit_cv_fold
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import binascii
from sklearn.model_selection import GroupKFold
from tqdm import tqdm
import os
import multiprocessing as mp
from functools import partial
import warnings
warnings.filterwarnings('ignore')
# %% Funtions for Initialization
# createSamples() takes the original dataframe from combined data,
# the names of columns of features to keep, the treshold values to determine
# what type of action is made based on the FIRST feature of features_list,
# days_avg the number of days used to compress datapoints, and returns a data frame with
# desired features and history, ratio values and history, and 'RISK' and 'ACTION'
# returns new dataframe with only the desired columns, number of features considered
def createSamples(df, # , # dataframe: original full dataframe
# new_cols, # str list: names of columns to be considered
target_colname, # str: col name of target_colname (i.e. 'deaths')
region_colname, # str, col name of region (i.e. 'state')
date_colname, # str, col name of time (i.e. 'date')
features_list, # list of str: i.e. (['mobility', 'testing'])
action_thresh_base, # int list: defining size of jumps in stringency
# d_delay, # int: day lag before calculating death impact
days_avg, # int: # of days to average when reporting death
region_exceptions=None):
actions = [0]
df.sort_values(by=[region_colname, date_colname], inplace=True)
df.rename(columns={date_colname: 'TIME'}, inplace=True)
# remove exceptions
if not (region_exceptions is None):
df = df[~(df[region_colname].isin(region_exceptions))]
action_thresh, no_action_id = action_thresh_base
# new_cols = ['state', 'date', 'cases', 'mobility_score']
if target_colname not in features_list:
new_cols = [region_colname] + ['TIME'] + [target_colname] + features_list
else:
new_cols = [region_colname] + ['TIME'] + features_list
df_new = df[new_cols]
# df_new.rename(columns = {df_new.columns[1]: 'TIME'}, inplace = True)
ids = df_new.groupby([region_colname]).ngroup()
df_new.insert(0, 'ID', ids, True)
# print(df.columns)
df_new.loc[:, ['TIME']] = pd.to_datetime(df_new['TIME'])
dfs = []
for region_name, group_region in df_new.groupby(region_colname):
first_date = group_region.TIME.min()
last_date = group_region.TIME.max()
date_index = pd.date_range(first_date, last_date, freq='1D')
date_index.name = 'TIME'
group_ = pd.DataFrame(index=date_index)
group_ = group_.join(group_region.set_index('TIME'))
if group_.shape[0] != group_region.shape[0]:
print('Missing dates: {} {} - {} missing rows'.format(region_colname,
region_name,
group_.shape[0] - group_region.shape[0]))
last_missing_date = group_[group_['ID'].isnull()].tail(1).index[0]
print('last missing date: {}'.format(str(last_missing_date)))
group_ = group_[group_.index > last_missing_date].copy()
dfs.append(group_)
df_new = pd.concat(dfs)
# print(df_new)
# calculating stringency based on sum of actions
# df['StringencyIndex'] = df.iloc[:, 3:].sum(axis=1)
# add a column for action, categorizing by change in stringency index
# df['StringencyChange'] = df['StringencyIndex'].shift(-1) - df['StringencyIndex']
# df.loc[df['ID'] != df['ID'].shift(-1), 'StringencyChange'] = 0
# df.loc[df['StringencyIndex'] == '', 'StringencyChange'] = 0
# print(df.loc[df['ID']=='California'])
# resample data according to # of days
g = df_new.groupby(['ID'])
cols = df_new.columns
# print('cols', cols)
dictio = {i: 'last' for i in cols}
for key in set([target_colname]+features_list):
dictio[key] = 'mean'
# dictio['StringencyChange'] = 'sum'
# del dictio['TIME']
df_new = g.resample('%sD' % days_avg).agg(dictio)
# df_new = g.resample('3D').mean()
# print('new', df_new)
df_new = df_new.drop(columns=['ID'])
df_new = df_new.reset_index()
# creating feature lag 1, feature lag 2 etc.
df_new.sort_values(by=['ID', 'TIME'], inplace=True)
for f in features_list:
df_new[f+'-1'] = df_new.groupby('ID')[f].shift(1)
df_new[f+'-2'] = df_new.groupby('ID')[f].shift(2)
# deleting target == 0
df_new = df_new.loc[df_new[target_colname] != 0, :]
# creating r_t, r_t-1, etc ratio values from cases
df_new['r_t'] = df_new.groupby('ID')[target_colname].pct_change(1) + 1
df_new['r_t-1'] = df_new.groupby('ID')['r_t'].shift(1)
df_new['r_t-2'] = df_new.groupby('ID')['r_t'].shift(2)
new_features = features_list + [f+'-1' for f in features_list] + \
[f+'-2' for f in features_list] + ['r_t', 'r_t-1', 'r_t-2']
df_new.dropna(subset=new_features,
inplace=True)
# Here we assign initial clustering by r_t
df_new['RISK'] = np.log(df_new['r_t'])
# create action
if len(action_thresh) == 0:
df_new['ACTION'] = 0
pfeatures = len(df_new.columns)-5
else:
action_thresh = [-1e20] + action_thresh + [1e20]
actions = list(range(-no_action_id, len(action_thresh)-1-no_action_id)) # [0, 1] #[0, 5000, 100000]
df_new[features_list[0]+'_change'] = df_new[features_list[0]+'-1'] -\
df_new[features_list[0]+'-2']
df_new['ACTION'] = pd.cut(df_new[features_list[0]+'_change'], bins=action_thresh, right=False, labels=actions)
# set the no action to 0
pfeatures = len(df_new.columns)-6
# df_new = df_new[df_new['r_t'] != 0]
df_new = df_new.reset_index()
df_new = df_new.drop(columns=['index'])
# moving region col to the end, since not a feature
if target_colname not in features_list:
df_new = df_new.loc[:, [c for c in df_new if c not in [region_colname, target_colname]]
+ [region_colname] + [target_colname]]
pfeatures -= 1
else:
df_new = df_new[[c for c in df_new if c not in [region_colname]]
+ [region_colname]]
# Drop all rows with empty cells
# df_new.dropna(inplace=True)
return df_new, pfeatures, actions
# split_train_test_by_id() takes in a dataframe of all the data,
# returns Testing and Training dataset dataframes with the ratio of testing
# data defined by float test_ratio
def split_train_test_by_id(data, # dataframe: all the data
test_ratio, # float: portion of data for testing
id_column): # str: name of identifying ID column
def test_set_check(identifier, test_ratio):
return binascii.crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))
return data.loc[~in_test_set], data.loc[in_test_set]
# (MDP functions)
# Fitting function used for the MDP fitting
# this function realize the split according to the mode,
# compute the training and testing errors for each fold and the averaged cross-validation error (training and testing)
def fit_cv(df,
pfeatures,
splitting_threshold,
clustering,
actions,
clustering_distance_threshold,
classification,
n_iter,
n_clusters,
horizon=5,
OutputFlag=0,
cv=3,
n=-1,
random_state=1234,
n_jobs=1,
mode='ID',
plot=False,
save=False,
savepath=''):
df_training_error = pd.DataFrame(columns=['Clusters'])
df_testing_error = pd.DataFrame(columns=['Clusters'])
testing_errors = []
# shuffle ID's and create a new column 'ID_shuffle'
random.seed(random_state)
g = [df for _, df in df.groupby('ID')]
random.shuffle(g)
df = | pd.concat(g) | pandas.concat |
#! /usr/bin/env python3
""" -------------------------------
Copyright (C) 2018 RISE
This code was produced by RISE
The 2013-03-26 version
bonsai/src_v02/diagnose.py
processing the diagnosis data
Notice: This file is not imported
using the name dia, since dia is
often used for a dataframe with
diagnosis data content
------------------------------------"""
import pandas as pd
import numpy as np
import copy
import bonsai_io as bio
import common
import lexicon
import global_settings as gs
import merge
""" ----------------------------------------
generate version 1 dia
---------------------------------------"""
def special(ICD10):
if common.isnull(ICD10): # not needed after filled in
return 0
if 'G' in str(ICD10):
return 1
if 'H' in str(ICD10):
return 1
if 'K' in str(ICD10):
return 1
if 'L' in str(ICD10):
return 1
if 'O' in str(ICD10):
return 1
if 'Q' in str(ICD10):
return 1
if 'R' in str(ICD10):
return 1
if 'T' in str(ICD10):
return 1
if 'Z' in str(ICD10):
return 1
if str(ICD10) == 'D469':
return 0
if str(ICD10) == 'D761':
return 0
if str(ICD10) == 'D459':
return 0
if 'D' in str(ICD10):
return 1
return 0
def generate_dia():
"""
constructing the file stored as generated_dia,
see places.py
"""
xcols = ['ICD7', 'ICD9', 'text_C24_']
dia = bio.read_original_dia()
dia = dia.sort_values(by = xcols)
print('orig shape:', dia.shape)
# (1) select the unique rows
dia = dia.drop_duplicates()
print('non duplicate shape:', dia.shape)
# (2) select first diagnoses
dia = dia[dia['DiagnosNr_Diagnos'] == '1']
dia = dia.drop(['DiagnosNr_Diagnos'], axis=1)
print('first dia shape:',dia.shape)
# (3) fill in codes
dia = lexicon.fill_in_by_compiled_lex(dia)
dia = dia.drop(xcols, axis=1)
print('filled dia shape:',dia.shape)
# (4) remove the special cases and the not needed columns
dia['special'] = dia['ICD10'].apply(special)
dia = dia[dia['special'] == 0]
dia = dia.drop(['special'], axis=1)
print('no special shape:',dia.shape)
# (5) take care of numbers
if 'Diagnos_lder' in gs.places.diagnose_selection:
dia['Diagnos_lder'] = dia['Diagnos_lder'].apply(common.str2number)
return dia
def rm_dia_cols(dia):
cols = gs.places.diagnose_ohe
dia = common.rmcols(dia, cols)
return dia
""" ----------------------------------------
add dia groups
---------------------------------------"""
def look_up_group_by_codes(groups, ICD10, SNOMED):
g = gs.names.input_data_group
row = groups[(groups['ICD10'] == ICD10) & (groups['SNOMED'] == SNOMED)]
if not row.empty:
return row[g].values[0]
return str(0)
def look_up_group(df, groups):
g = look_up_group_by_codes(groups, df['ICD10'], df['SNOMED'])
return g
def add_group(dia, groups):
g = gs.names.dia_group
dia[g] = dia.apply(lambda d: look_up_group(d, groups), axis = 1)
return dia
def rm_group_col(dia):
g = gs.names.dia_group
dia = common.rmcol(dia, g)
return dia
""" ----------------------------------------
to one data frame df1 add columns
from another data frame df2
Note: in add_cols
df1[yc] = 0 when there is no entry x in df2
df1[yc] = NaN when df2[x] = NaN
---------------------------------------"""
def look_up_entry(entry, df, entry_col, value_col):
dfe = df[df[entry_col] == entry]
if not dfe.empty:
return dfe[value_col].values[0]
return str(0)
def add_cols(df1, df2, xc, ycs):
for yc in ycs:
df1[yc] = df1[xc].apply(lambda x:look_up_entry(x, df2, xc, yc))
return df1
def look_up_or_zero(entry, df, entry_col, value_col, verb = False):
dfe = df[df[entry_col] == entry]
if not dfe.empty:
val = dfe[value_col].values[0]
if verb:
print('val =', val, type(val))
if isinstance(val, str):
return val
return str(0)
def add_cols_or_zero(df1, df2, xc, ycs, verb = False):
df1_copy = copy.copy(df1)
for yc in ycs:
df1_copy[yc] = df1[xc].apply(lambda x:look_up_or_zero(x, df2, xc, yc, verb))
return df1_copy
"""
def add_cols_or_zero(df1, df2, xc, ycs, verb = False):
for yc in ycs:
df1[yc] = df1[xc].apply(lambda x:look_up_or_zero(x, df2, xc, yc, verb))
return df1
"""
def look_up_aho(df, x, col, zero = False):
return df[col][x] if x in df.index and (not zero or isinstance(df[col][x], str)) else str(0)
def add_cols_aho(df1, df2, xc, ycs, zero = False):
if zero:
for yc in ycs:
df1[yc] = df1[xc].apply(lambda x: df2[yc][x] if x in df2.index and isinstance(df2[yc][x], str) else str(0))
else:
for yc in ycs:
df1[yc] = df1[xc].apply(lambda x: df2[yc][x] if x in df2.index else str(0))
return df1
""" ----------------------------------------
add person data
---------------------------------------"""
def add_pers(dia):
cols = gs.places.person_cols
copy_cols = copy.copy(cols)
copy_cols.remove('LopNr')
pers = bio.readperson()
dia = add_cols(dia, pers, 'LopNr', copy_cols)
return dia
def add_pers_ohe(dia):
cols = gs.places.person_ohe
df = ones_x(dia, cols)
return df
def rm_pers_cols(dia):
cols = gs.places.person_ohe
dia = common.rmcols(dia, cols)
return dia
""" ----------------------------------------
add incare data (sluten vaard)
---------------------------------------"""
def add_incare(dia, nr = 0):
xcol = 'LopNr'
ycol = gs.places.incare_ohe[0] # only one column is used so far
inc = bio.readincare()
inc = inc.sort_values([xcol]).reset_index(drop=True)
if nr > 0:
inc = inc[0:nr] # an initial part of incare
L = list(dia[xcol].values)
inc = inc[ inc[xcol].isin(L) ] # part of inc with LopNr in dia
inc = name_compression(inc, xcol, ycol) # first letter set for each LopNr
dia = add_cols(dia, inc, xcol, [ycol]) # add compressed inc cols to dia
return dia
# the following functions are just for test since the incare compression
# lists are merged with the nicare and causes lists before unfolding ohe
def add_incare_ohe(dia):
ycol = gs.places.incare_ohe[0] # only one column is used so far
dia = one_general(dia, ycol) # mk first letter one hot
return dia
def rm_incare_cols(dia):
cols = gs.places.incare_ohe
dia = common.rmcols(dia, cols)
return dia
""" ----------------------------------------
add nicare data (oppen vaard)
---------------------------------------"""
def add_nicare(dia, nr = 0):
xcol = 'LopNr'
ycol = gs.places.nicare_ohe[0] # only one column is used so far
nic = bio.readnicare()
nic = nic.sort_values([xcol]).reset_index(drop=True)
if nr > 0:
nic = nic[0:nr] # an initial part of incare
L = list(dia[xcol].values)
nic = nic[ nic[xcol].isin(L) ] # part of nic with LopNr in dia
nic = name_compression(nic, xcol, ycol) # first letter set for each LopNr
dia = add_cols(dia, nic, xcol, [ycol]) # add compressed nic cols to dia
return dia
# the following functions are just for test since the nicare compression
# lists are merged with the nicare and causes lists before unfolding ohe
def add_nicare_ohe(dia):
ycol = gs.places.nicare_ohe[0] # only one column is used so far
dia = one_general(dia, ycol) # mk first letter one hot
return dia
def rm_nicare_cols(dia):
cols = gs.places.nicare_ohe
dia = common.rmcols(dia, cols)
return dia
""" ----------------------------------------
add drug data
---------------------------------------"""
def add_drug(dia):
cols = gs.places.drug_selection.copy()
cols.remove('LopNr')
drug = bio.readdrug()
dia = add_cols(dia, drug, 'LopNr', cols)
return dia
def add_drug_ohe(dia):
cols = gs.places.drug_ohe
df = ones_x(dia, cols)
return df
def rm_drug_cols(dia):
cols = gs.places.drug_ohe
dia = common.rmcols(dia, cols)
return dia
""" ----------------------------------------
add one hot encodings for names column
with unique names (LopNr) and a single
code in each row in the codes column
---------------------------------------"""
"""
def equal_str(a, b):
if not (isinstance(a, str) and isinstance(b, str)):
return 0
return int(a == b)
"""
def one(df, c):
for x in df[c].dropna().drop_duplicates():
df[x] = (df[c] == x).astype(int)
return df
"""
def one_x(df, c):
for x in df[c].drop_duplicates():
if isinstance(x, str):
df[c + '_' + x] = df[c].apply(lambda z: equal_str(z, x))
return df
"""
def one_x(df, c):
for x in df[c].dropna().drop_duplicates():
df[c + '_' + x] = (df[c] == x).astype(int)
return df
def ones_x(df, cs):
for c in cs:
df = one_x(df, c)
return df
def to_int(x):
if isinstance(x, str):
return int(x)
if isinstance(x, int):
return x
return -1
def nr_sort(xs):
"""
sort a list of numbers on str type
"""
if not common.isarray(xs):
return []
ixs = list(map(to_int, xs))
ixs.sort()
xs = list(map(str, ixs))
return xs
def one_sorted(df, c):
xs = list(df[c].drop_duplicates())
xs = nr_sort(xs)
for x in xs:
df[c + '_' + x] = (df[c] == x).astype(int)
return df
def add_one_hot_groups(dia):
grp = gs.names.dia_group
dia = one_sorted(dia, grp)
return dia
""" ----------------------------------------
add one hot encodings for names column with
non-unique names and possibly several space
separated codes in each row in the codes
column
---------------------------------------"""
def head(xs):
ys = []
for x in xs:
if isinstance(x, str):
ys += [x[0]]
return ys
def split_and_head(ys):
cs = []
for y in ys.values:
if not common.isnull(y):
cs = np.append(cs, y.split())
return np.unique(head(cs))
def name_compression(df, xcol, ycol):
data = []
xs = df[xcol].drop_duplicates()
for x in xs:
dx = df[ df[xcol] == x ]
ys = dx[ycol].drop_duplicates()
ys = split_and_head(ys)
data = data + [ [x] + [ys] ]
ds = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
from typing import List
import sys
metabolites_path = sys.argv[1]
proteins_path = sys.argv[2]
pathways_path = sys.argv[3]
def make_node_set(df):
return df.reindex(columns=['id', 'name', 'category', 'description', 'synonyms', 'xrefs'])
def make_edge_set(df):
return df.reindex(columns=['subject_id', 'predicate', 'object_id'])
def build_pathways(path) -> pd.DataFrame:
"""
Builds the pathway node set
"""
df = pd.read_csv(path, dtype=str)
df = df.rename(columns={
'SMPDB ID' : 'id',
'Name' : 'name',
'Description' : 'description'
})
df['category'] = 'pathway'
df['id'] = df.apply(lambda row: "SMP:{}".format(row['id']), axis=1)
df = make_node_set(df)
return df
def build_metabolites(path) -> (pd.DataFrame, pd.DataFrame):
"""
Builds the metabolite node set and edge set for the chemical_to_pathway_association
predicate.
"""
def build(row):
options = [
('ChEBI ID', 'CHEBI'),
('KEGG ID', 'KEGG'),
('HMDB ID', 'HMDB'),
('DrugBank ID', 'DRUGBANK'),
('Metabolite ID', 'PW'),
]
for column, prefix in options:
if isinstance(row[column], str):
return '{}:{}'.format(prefix, row[column])
print(row)
raise Exception('Could not find a metabolite ID')
df = pd.read_csv(path, dtype=str)
df['id'] = df.apply(build, axis=1)
df = df.drop_duplicates('id')
df['SMPDB ID'] = df.apply(lambda row: "SMP:{}".format(row['SMPDB ID']), axis=1)
nodes = df
edges = df
nodes['category'] = 'metabolite'
nodes = nodes.rename(columns={
'Metabolite Name' : 'name',
'IUPAC' : 'synonyms',
})
edges['predicate'] = 'chemical_to_pathway_association'
edges = df.rename(columns={
'id' : 'subject_id',
'SMPDB ID' : 'object_id',
})
nodes = make_node_set(nodes)
edges = make_edge_set(edges)
return nodes, edges
def build_proteins(path) -> (pd.DataFrame, pd.DataFrame):
"""
Builds the protein node set and edge set for the chemical_to_pathway_association
predicate.
"""
def build(row):
options = [
('Uniprot ID', 'UNIPROT'),
('DrugBank ID', 'DRUGBANK'),
('HMDBP ID', 'HMDB'),
('GenBank ID', 'GENBANK'),
]
# xrefs = []
for column, prefix in options:
if isinstance(row[column], str):
return '{}:{}'.format(prefix, row[column])
# xrefs.append(f'{prefix}:{row[column]}')
# if xrefs == []:
# raise Exception('Cannot find ID for above row')
# else:
# row['id'] = xrefs[0]
# row['xrefs'] = ';'.join(xrefs[1:])
# return row
df = | pd.read_csv(path, dtype=str) | pandas.read_csv |
## Generate twitter Pre-Trained Word2Vec and trained Word2Vec
## Word2Vec
import os
os.chdir("C:/Users/dordo/Dropbox/Capstone Project")
import pandas as pd
import pickle
from gensim import corpora
from gensim.models import Word2Vec
import gensim.downloader as api
##---------------------------------------------------------------------------##
## Define function to get embeddings from memory
def get_wv(model, dicts):
""" Get word embeddings in memory"""
w2v_embed = {}
missing = []
for val in dicts.values():
try:
it = model.wv[val]
except:
missing.append(val)
it = None
w2v_embed[val] = it
return w2v_embed, missing
##---------------------------------------------------------------------------##
## Reading in pre processed data
with open('Data/Twitter/ProcessedTwitter.pkl', 'rb') as input:
txt_end = pickle.load(input)
## Create dictionary
dicts = corpora.Dictionary(txt_end)
len(dicts)
## Filter by appeareance in documents
dicts.filter_extremes(no_below=40, no_above=0.5, keep_n=None, keep_tokens=None)
len(dicts)
##--------------------------------------------------------------------------##
## PreTrained Word2vec
path = "C:/Users/dordo/Documents/Daniel/LSE/Capstone/Modelo/GoogleNews-vectors-negative300.bin"
model = Word2Vec(txt_end, size = 300, min_count = 40)
model.intersect_word2vec_format(path,
lockf=1.0,
binary=True)
model.train(txt_end, total_examples=model.corpus_count, epochs=25)
embeds_1 = get_wv(model, dicts)
## How many word of our corpus appear in the pre trained?
##---------------------------------------------------------------------------##
## Self Trained Word2Vec
model_t = Word2Vec(txt_end, window=5, min_count=40, workers=4, size = 50)
model_t.train(txt_end, epochs=50, total_words = model_t.corpus_total_words,
total_examples = model_t.corpus_count)
embeds_2 = get_wv(model_t, dicts)
##---------------------------------------------------------------------------##
## Pre Trained GLOVE
model_g = api.load("glove-twitter-50")
embeds_3 = get_wv(model_g, dicts)
embeds_3df = pd.DataFrame(embeds_3[0])
## This are the embeddings that are really available in GLOVE
embeds_3df.T[~embeds_3df.T[1].isnull()]
##---------------------------------------------------------------------------##
## Saving
pretrained_embed = | pd.DataFrame(embeds_1[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from argparse import RawTextHelpFormatter, ArgumentParser
import pandas as pd
import os
from datetime import timedelta, datetime
from amurlevel_model.config import DAYS_FORECAST, ALL_STATIONS, NUMBER_OF_INFERENCE_STATIONS, DATASETS_PATH
from amurlevel_model.dataloaders.asunp import get_asunp_hydro_stations
from amurlevel_model.dataloaders.meteo import read_history_meteo
from amurlevel_model.dataloaders.hydro import read_hydro_all
from amurlevel_model.processing.merge_hydro_meteo import merge_hydro_meteo
from amurlevel_model.processing.merge_hydro_weatherforecast import merge_hydro_weatherforecast
from amurlevel_model.processing.merge_hydro_asunp import merge_hydro_asunp
from amurlevel_model.processing.merge_meteo_asunp import merge_meteo_asunp
from amurlevel_model.features.amur_features import make_dataset
from amurlevel_model.model.prepare_data import prepare_data
from amurlevel_model.model.model import build_model
from amurlevel_model.utils.common import set_logger
class EmptyHistoricalMeteo(Exception):
pass
class EmptyHistoricalHydro(Exception):
pass
def parse_args():
parser = ArgumentParser(description='''
Скрипт для проверки качества обученной модели для предсказания уровня воды.
Предсказывается на 10 дней вперед от f_day
Результаты сохраняются в файл level_{f_day}.csv
Пример: python predict.py -f_day 2020-11-01 -w /data/weights-aij2020amurlevel-2017.h5 (предсказания будут от 2020-11-01 до 2020-11-11 по модели обученной до 2018 года)
python predict.py -f_day 2013-02-01 -w /data/weights-aij2020amurlevel-2012.h5 (предсказания будут от 2013-02-01 до 2013-02-11 по модели обученной до 2013 года)
''', formatter_class=RawTextHelpFormatter)
parser.add_argument('-f_day', type=str, required=True,
help='Дата от которой считаем предсказания')
parser.add_argument('-w','--weights', dest='w',type=str, required=True,
help='Путь до файла с весами модели')
args = parser.parse_args()
return args
if __name__ == "__main__":
logger = set_logger()
args = parse_args()
model = build_model()
f_day = | pd.to_datetime(args.f_day) | pandas.to_datetime |
# Core functions
#
# this file contains reusable core functions like filtering on university
# and adding year and month name info
# these are functions which are generally used in every product
# roadmap: I want to push all functions from loose function
# to functions combined in classgroups
from nlp_functions import remove_punctuation
from nlp_functions import get_abstract_if_any
from nlp_functions import comma_space_fix
#from static import PATH_START, PATH_START_PERSONAL
#from static import PATH_START_SERVER , PATH_START_PERSONAL_SERVER
#from static import UNPAYWALL_EMAIL
#from static import PATH_STATIC_RESPONSES
#from static import PATH_STATIC_RESPONSES_ALTMETRIC
#from static import PATH_STATIC_RESPONSES_SCOPUS_ABS
#from static import MAX_NUM_WORKERS # not used everywhere so care
import pandas as pd
import calendar
import numpy as np
import requests
from pybliometrics.scopus import ScopusSearch
from pybliometrics.scopus import AbstractRetrieval
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from functools import partial
### from functools import wraps
import time
from datetime import datetime # new
from datetime import timedelta
import re
import mysql.connector
from mysql.connector import Error
from altmetric import Altmetric
import pickle
import functools
from unittest.mock import Mock
from requests.models import Response
#import sys
from nlp_functions import faculty_finder
from pybliometrics.scopus import config
from pybliometrics.scopus.exception import Scopus429Error
import static
def overloaded_abstract_retrieval(identifier, view='FULL', refresh=True, id_type='eid'):
"""
The only thing this extra layer does is swap api-keys on error 429
Any multi-threading etc is done elsewhere (and may need its own testing as always)
"""
try:
res = AbstractRetrieval(identifier=identifier, view=view, refresh=refresh, id_type=id_type)
time.sleep(0.05)
except Scopus429Error:
# Use the last item of _keys, drop it and assign it as
# current API key
# update: keep swapping until it works
still_error = True
while still_error:
if len(static.SCOPUS_KEYS) > 0:
config["Authentication"]["APIKey"] = static.SCOPUS_KEYS.pop()
try:
time.sleep(1) # only when key has changed so 1s is fine
res = AbstractRetrieval(identifier=identifier, view=view, refresh=refresh, id_type=id_type)
still_error = False
except Scopus429Error: # NO! only for 429
print('error, key pop will happen at top of while top')
except:
print('non429 error')
still_error = False
res = None # ?
else:
still_error = False
res = None # ?
return res
def make_doi_list_from_csv(source_path, output_path, do_return=True):
# this function returns a list of DOIs from a source scopus frontend file
# in: source_path: a full path ending with .csv which contains a csv which has a column 'DOI'
# output_path: a full path ending with .csv which will be where the result is returned as csv
# out: a csv is generated and saved, and is returned as dataframe as well
#
df = pd.read_csv(source_path)
df[~df.DOI.isnull()].DOI.to_csv(output_path, header=False)
if do_return:
return df[~df.DOI.isnull()].DOI
else:
return None
def filter_on_uni(df_in, affiliation_column, cur_uni, affiliation_dict_basic):
"""" returns the dataframe filtered on the chosen university
in: df with column 'Scopus affiliation IDs' with list of affiliation ids in scopus style
cur_uni: a university name appearing in the dictionary affiliation_dict_basic
affiliation_dict_basic: a dictionary with keys unis and values affiliation ids
out: df filtered over rows
"""
# now the return has all info per university
# ! scival may change their delimiters here, so please check once a while if it works as intended
# put an extra check here to be safe
return df_in[df_in.apply(lambda x: not (set(x[affiliation_column].split('| '))
.isdisjoint(set(affiliation_dict_basic[cur_uni]))), axis=1)]
def add_year_and_month_old(df_in, date_col):
"""" adds two columns to a dataframe: a year and a month
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start]YYYY[any 1 char]MM[anything][end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
df_in['year'] = df_in[date_col].apply(lambda x: x[0:4])
df_in['month'] = df_in[date_col].apply(lambda x: x[5:7])
df_in['month_since_2018'] = df_in.month.astype('int') + (df_in.year.astype('int')-2018)*12
df_in['month_name'] = df_in.month.astype('int').apply(lambda x: calendar.month_name[x])
return df_in
def add_year_and_month(df_in, date_col):
"""" adds two columns to a dataframe: a year and a month
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start]YYYY[any 1 char]MM[anything][end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
df_in['year'] = df_in[date_col].apply(lambda x: None if x is None else x[0:4])
df_in['month'] = df_in[date_col].apply(lambda x: None if x is None else x[5:7])
df_in['month_since_2018'] = df_in.apply(lambda x: None if x.month is None else int(x.month) + (int(x.year)-2018)*12, axis=1)
#df_in.month.astype('int') + (df_in.year.astype('int')-2018)*12
df_in['month_name'] = df_in.month.apply(lambda x: None if x is None else calendar.month_name[int(x)])
return df_in
def add_pure_year(df_in, date_col='Current publication status > Date'):
"""" adds one columns to a dataframe: a 'pure_year' based on pure info.
The input must fit the PURE form as 'Anything+YY'
We assume the year is after 2000! there are no checks for this
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start][anything]YYYY[end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
if date_col is None:
df_in['pure_year'] = np.nan
else:
df_in['pure_year'] = df_in[date_col].apply(lambda x: float('20' + x[-2:]))
return df_in
def get_scopus_abstract_info(paper_eid):
"""
Returns the users df_in with extra columns with scopus abstract info per row or with diagnostics
:param df_in: must have doi and eid
:return:
"""
# init
no_author_group = True # we want this too
error = False
ab = None
error_message = 'no error'
if paper_eid == None:
# paper_without eid
error_message = 'paper eid is none'
error = True
else:
try:
ab = overloaded_abstract_retrieval(identifier=paper_eid, view='FULL', refresh=True, id_type='eid')
except:
error = True
error_message = 'abstract api error'
if not(error):
# chk if API errors out on authorgroup call and log it
try:
ab.authorgroup
no_author_group = False
except:
no_author_group = True
##### this belongs in another function, with its own diagnostics + only run ff if this succeeds in topfn
####if not(no_author_group):
#### (bool_got_vu_author, a, b) = find_first_vu_author() # yet to make this
# also if no error, save the result for returns
return {'abstract_object': ab,
'no_author_group_warning': no_author_group,
'abstract_error': error,
'abstract_error_message': error_message}
def split_scopus_subquery_affils(subquery_affils, number_of_splits=4,
subquery_time = ''):
"""
! This function needs testing
This function takes in subquery_affils from make_affiliation_dicts_afids()
and translates it into a list of subqueries to avoid query length limits
in: subquery_affils from make_affiliation_dicts_afids()
number_of_splits: an integer between 2 and 10
subquery_time: an optional query to paste after every subquery
out: a list of subqueries to constrain scopussearch to a subset of affils
during stacking be sure to de-duplicate (recommended on EID)
"""
if (number_of_splits <= 10) & (number_of_splits > 1) & (number_of_splits % 1 == 0):
pass # valid number_of_splits
# you do not have to worry about number_of_splits < #afids because
# in python asking indices range outside indices range yields empty lists
# s.t. stacking them here does nothing
# needs checking though
else:
print('invalid number_of_splits, replacing with 4')
number_of_splits = 4
affil_count = len(subquery_affils.split('OR')) # number of affiliation ids
if affil_count <= 12: # to avoid weird situations
print('affil_count is small, returning single subquery')
my_query_set = subquery_affils + subquery_time
else:
# do it
my_query_set = []
step_size = int(np.floor(affil_count / number_of_splits)+1)
counter = 0
for cur_step in np.arange(0,number_of_splits):
if counter == 0:
cur_subquery = 'OR'.join(subquery_affils.split('OR')[0:step_size]) + ' ) '
elif counter == number_of_splits-1: # this is the last one
cur_subquery = ' ( ' + 'OR'.join(subquery_affils.split('OR')[step_size*cur_step:step_size*(cur_step+1)]) # + ' ) ) '
else:
cur_subquery = ' ( ' + 'OR'.join(subquery_affils.split('OR')[step_size*cur_step:step_size*(cur_step+1)]) + ' ) '
# stack results in a list, check if we need extra [] or not !
cur_subquery = cur_subquery + subquery_time
my_query_set.append(cur_subquery)
counter = counter + 1 # useless but OK
#print('-----')
#print(my_query_set)
#print('-----')
return my_query_set
def get_first_chosen_affiliation_author(ab, chosen_affid):
"""
:param ab:
:return:
"""
# init
first_vu_author = None
cur_org = None
has_error = False
first_vu_author_position = None # care reverse!!! you need a length here or extra unreverse
try:
# loop over the authors in the author group, back to front, s.t. the 'first' vu author overwrites everything
# this is not ideal,
# because we would also want to check the second vu-author if first one can't be traced back to a faculty
for cntr, author in enumerate(ab.authorgroup[::-1]): # ensures the final vu_author result is the leading vu author
if author.affiliation_id == None:
# then we can't match as vu author (yet), so we just skip as we do non-vu authors
1
else:
if not (set(author.affiliation_id.split(', ')).isdisjoint(set(chosen_affid))):
cur_org = author.organization
if author.given_name == None:
author_given_name = '?'
else:
author_given_name = author.given_name
if author.surname == None:
author_surname = '?'
else:
author_surname = author.surname
first_vu_author = author_given_name + ' ' + author_surname
except:
has_error = True
return {'first_affil_author': first_vu_author,
'first_affil_author_org': cur_org,
'first_affil_author_has_error': has_error}
def get_count_of_chosen_affiliation_authors(ab, chosen_affid):
"""
:param ab:
:return:
"""
# init
author_count_valid = False
author_count = 0
has_error = False
try:
# loop over the authors in the author group, back to front, s.t. the 'first' vu author overwrites everything
# this is not ideal,
# because we would also want to check the second vu-author if first one can't be traced back to a faculty
for cntr, author in enumerate(ab.authorgroup[::-1]): # ensures the final vu_author result is the leading vu author
if author.affiliation_id == None:
# then we can't match as vu author (yet), so we just skip as we do non-vu authors
1
else:
if not (set(author.affiliation_id.split(', ')).isdisjoint(set(chosen_affid))):
# then we have a vu-author. Count and continue
# notice there is no safety net if an author appears multiple times for some reason
author_count = author_count + 1
author_count_valid = True
except:
has_error = True
# then the author_count_valid remains False
return {'affil_author_count': author_count,
'affil_author_count_valid': author_count_valid,
'affil_author_count_has_error': has_error}
# upw start
## 1st at bottom
## 2nd
# remember, these are not for general purpose, but specific decorators for api-harvester-type functions crystal_()
def check_id_validity(func):
# first layer is a pass right now and that is OK
def decorator_check_id_validity(func):
@functools.wraps(func)
def wrapper_check_id_validity(cur_id, my_requests):
#
# pre-process
valid_doi_probably = False
if cur_id is not None:
if pd.notnull(cur_id):
if cur_id != 'nan':
try:
cur_id = cur_id.lower()
valid_doi_probably = True
except:
try:
cur_id = str(cur_id).lower() # not sure but OK
valid_doi_probably = True # stay on safe side then and loose tiny bit of performance
except:
# then give up
print('warning: failed to str(cur_doi).lower()')
if not valid_doi_probably:
# chance cur_id s.t. the crystal function can skip the checks and directly insert invalid-id-result
cur_id = 'invalid' # the only change
# end of pre-process
#
# run the core function
r, relevant_keys, cur_id_lower, prepend, id_type = func(cur_id, my_requests)
#
# no post-process
#
return r, relevant_keys, cur_id_lower, prepend, id_type
return wrapper_check_id_validity
return decorator_check_id_validity(func)
#############################################add_deal_info
## 3rd
def check_errors_and_parse_outputs(func):
# first layer is a pass right now and that is OK
def decorator_check_errors_and_parse_outputs(func):
@functools.wraps(func)
def wrapper_check_errors_and_parse_outputs(cur_id, my_requests=requests): # !!!!
#
# pre-processing
#
#
r, relevant_keys, cur_id_lower, prepend, id_type = func(cur_id, my_requests)
#
# post-processing
#
# init a dict and fill with right keys and zeros
dict_init = {} # values are filled with None as starting point
for key in relevant_keys:
dict_init[prepend + key] = None # really init empty and stays empty if error
dict_init[prepend + id_type] = None # can only be data['doi'] (!) # legacy
dict_init[prepend + id_type + '_lowercase'] = cur_id_lower
dict_init['own_' + id_type + '_lowercase'] = cur_id_lower
dict_init['orig_' + id_type] = cur_id # legacy
#
dict_to_add = dict_init
# ! somehow need to recognize doi_lowercase too...
#
try:
if 'error' in r.json().keys():
# the following code has been checked to work as intended
has_error = True
error_message = r.json()['message']
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
else:
# case: no error
#print(r)
#print(r.json())
has_error = False
error_message = 'no error'
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
# get data
try:
data = r.json()['results'][0]
except:
data = r.json()
# overwrite dict_to_add with data
for key in relevant_keys:
try:
dict_to_add[prepend + key] = data[key] # even upw_doi goes automatically : )
except KeyError:
dict_to_add[prepend + key] = None # if the key is not there, the result is None
dict_to_add[prepend + id_type] = cur_id # fix
except:
has_error = True
error_message = "error in r.json() or deeper"
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
return pd.Series(dict_to_add) # r, relevant_keys # different output # output has been changed
return wrapper_check_errors_and_parse_outputs
return decorator_check_errors_and_parse_outputs(func)
#############################################
## 4th
def faster(func):
# makes stuff for lists of ids and enables multi-threading and persistent sessions : ) amazing
# first layer is a pass right now and that is OK
def decorator_iterate_list(func):
@functools.wraps(func)
def wrapper_iterate_list(doi_list, silent=True, multi_thread=True, my_requests=None, allow_session_creation=True):
""" returns unpaywall info for a given doi list, includes result success/failure and diagnostics
:param doi_list: doi list as a list of strings, re-computes if doi are duplicate
does not de-dupe or dropna for generality, but you can do doi_list = df_in.doi.dropna().unique()
if you so desire
silent: whether you want silent behaviour or not, defaults to printing nothing
multi_thread: whether you want to multi_thread unpaywall (code has been tested), on by default
you do not have to worry about worker counts, a default law is integrated for that
my_requests: by default None, but can be exchanged for a requests-session on demand
with default, called functions will themselves enter 'requests' to reduce communication costs
allow_session_creation: if my_requests=None, this allows the fn to make its own session
:return: subset of unpaywall columns info + diagnostics as a pandas DataFrame, vertically doi's in lowercase-form.
duplicate doi's in the list are ignored, and the output has 1 row per unique DOI
Notice: this should be the only function to call fn_get_upw_info for more than 1 DOI (for developers)
, s.t. the multi-threading code can be here without duplicate code
"""
# all processing
# empty dataframe
df_unpaywall = pd.DataFrame()
if multi_thread: # valid across session used or not
max_num_workers = static.MAX_NUM_WORKERS
num_workers = np.max(
[1, int(np.floor(np.min([max_num_workers, np.floor(float(len(doi_list)) / 4.0)])))])
if (my_requests is None) & (allow_session_creation is True) & (len(doi_list) >= 20):
# then optionally make your own session # + avoid overhead for small jobs
# perform with a session
with requests.Session() as sessionA:
if multi_thread:
fn_get_upw_info_partial = partial(func,
my_requests=sessionA) # avoid communication costs
multi_result = multithreading(fn_get_upw_info_partial,
doi_list,
num_workers)
for cur_series in multi_result:
df_unpaywall = df_unpaywall.append(cur_series, ignore_index=True)
else: # single thread
for (counter, cur_doi) in enumerate(doi_list):
if silent == False:
print(
'unpaywall busy with number ' + str(counter + 1) + ' out of ' + str(len(doi_list)))
cur_res = func(cur_doi, my_requests=sessionA)
df_unpaywall = df_unpaywall.append(cur_res, ignore_index=True)
else:
# perform without a session
if multi_thread:
fn_get_upw_info_partial = partial(func,
my_requests=my_requests) # avoid communication costs
multi_result = multithreading(fn_get_upw_info_partial,
doi_list,
num_workers)
for cur_series in multi_result:
df_unpaywall = df_unpaywall.append(cur_series, ignore_index=True)
else: # single thread
for (counter, cur_doi) in enumerate(doi_list):
if silent == False:
print('unpaywall busy with number ' + str(counter + 1) + ' out of ' + str(len(doi_list)))
cur_res = func(cur_doi, my_requests=my_requests)
df_unpaywall = df_unpaywall.append(cur_res, ignore_index=True)
# either way, return the result
return df_unpaywall
return wrapper_iterate_list
return decorator_iterate_list(func)
## 5th
def appender(func, cur_id_name='doi'):
"""
Returns the given dataframe with extra columns with unpaywall info and result success/failure and diagnostics
Merging is done with lower-cased DOI's to avoid duplicate issues. The DOI name is case-insensitive
:param df_in: df_in as a pandas dataframe, must have a column named 'doi' with doi's as string
:return: pandas dataframe with extra columns with subset of unpaywall info and result success/failure and diagnostic
all new doi info is lowercase
"""
def decorator_appender(func):
@functools.wraps(func)
def wrapper_appender(df_in, silent=True, cut_dupes=False, avoid_double_work=True,
multi_thread=True, my_requests=None, allow_session_creation=True):
if cur_id_name == 'eid':
print('warning: scopus abstract accelerator has not been validated yet !')
# make doi_list
if avoid_double_work:
doi_list = df_in.drop_duplicates(cur_id_name)[cur_id_name].to_list() # notice no dropna to keep functionality the same
# also no lower-dropna for simplicity
else:
doi_list = df_in[cur_id_name].to_list()
if cut_dupes:
print('deprecated code running')
# I think it should yield exactly the same result, but needs testing that is all
# overwrites
doi_list = df_in[cur_id_name].dropna().unique()
# get unpaywall info
df_unpaywall = func(doi_list, silent, multi_thread, my_requests, allow_session_creation)
# merge to add columns
# prepare doi_lower
df_in.loc[:, 'id_lowercase'] = df_in[cur_id_name].str.lower()
df_merged = df_in.merge(df_unpaywall.drop_duplicates('own_' + cur_id_name + '_lowercase'),
left_on='id_lowercase', right_on='own_' + cur_id_name + '_lowercase', how='left')
# drop duplicates in df_unpaywall to avoid having duplicates in the result due repeating DOI's or Nones
# assumption: all none returns are the exact same
if not silent:
print('done with add_unpaywall_columns')
return df_merged
return wrapper_appender
return decorator_appender(func)
@appender
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_unpaywall(cur_id, my_requests):
# always use cur_id, my_requests for in and r, relevant_keys for out
# id is either cur_doi or 'invalid' if invalid
prepend = 'upw_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
relevant_keys = ['free_fulltext_url',
'is_boai_license', 'is_free_to_read', 'is_subscription_journal',
'license', 'oa_color'] # , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + static.UNPAYWALL_EMAIL) # force string
# keep multi_thread to 16 to avoid issues with local computer and in rare occasions the api returns
# this try making the code 10x slower
"""
try:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
except:
print('request failed hard for unpaywall, filling blank')
in_file = open(PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
"""
return r, relevant_keys, cur_id_lower, prepend, id_type
add_unpaywall_columns = crystal_unpaywall # the final function goes through the new pipe
# recreate the legacy unpaywall functions for now
#
def legacy_crystal_unpaywall(cur_id, my_requests):
# always use cur_id, my_requests for in and r, relevant_keys for out
# id is either cur_doi or 'invalid' if invalid
prepend = 'upw_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
relevant_keys = ['free_fulltext_url',
'is_boai_license', 'is_free_to_read', 'is_subscription_journal',
'license', 'oa_color'] # , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + static.UNPAYWALL_EMAIL) # force string
# keep multi_thread to 16 to avoid issues with local computer and in rare occasions the api returns
# this try making the code 10x slower
"""
try:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
except:
print('request failed hard for unpaywall, filling blank')
in_file = open(PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
"""
return r, relevant_keys, cur_id_lower, prepend, id_type
fn_get_upw_info = check_errors_and_parse_outputs(check_id_validity(legacy_crystal_unpaywall)) # avoid, legacy
fn_get_all_upw_info = faster(fn_get_upw_info) # these are only for legacy and should be avoided
###add_unpaywall_columns = appender(fn_get_all_upw_info) # the final function goes through the new pipe
#
# I do not like this kind of handling as it breaks some functools functionality
# I will refactor legacy code later some time
@appender
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_altmetric(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
"""
prepend = 'altmetric_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
# some settings
api_ver = 'v1' # may change in future, so here it is. For api-key re-edit with altmetric package
api_url = "http://api.altmetric.com/%s/" % api_ver
url = api_url + 'doi' + "/" + cur_id
relevant_keys = ['title', 'cited_by_policies_count', 'score'] # OK for now, care some may miss, patch for that !
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_ALTMETRIC, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
r = my_requests.get(url, params={}, headers={})
return r, relevant_keys, cur_id_lower, prepend, id_type
add_altmetric_columns = crystal_altmetric
###@appender(cur_id_name='eid')
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_scopus_abstract(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
"""
prepend = 'scopus_abstract_'
id_type = 'eid'
cur_id_lower = cur_id.lower() # irrelevant but OK
### not used
###if my_requests is None:
#### my_requests = requests # avoids passing requests around everytime
# some settings
# None
# the issue is that ab is not a requests-type
# but we need requests-type
# also, I do not want to use homebrew request code for it because scopus apis are an outsourced mess
# instead we will use a mock
relevant_keys = ['obje', 'retries'] # all in one, care integration
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
# r = my_requests.get(url, params={}, headers={})
#
# scopus api is not friendly so I need a try/except here
#
# wait-and-retry
one_shot = False
if one_shot:
retries = 0
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
r = Mock(spec=Response)
r.json.return_value = {'obje': pickle.dumps(ab), 'message': 'hi', 'retries':retries}
r.status_code = 999
# requirements:
# r.json().keys
# r.json()['message']
# r.json()['results'] # if not present, will not unpack and use json().keys()
except:
# if so, fall back to invalid routine
#
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# print(one_shot)
retry = True
retries = -1
while retry:
#retry = False # removes retries
retries = retries + 1
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
qq = ab.title
qqx = qq + 'x'
#
# if api does not error, and wepuyc have an title, then the call is correct and we got info back successfully
#
# then do rest of actions
r = Mock(spec=Response)
r.json.return_value = {'obje': pickle.dumps(ab), 'message': 'hi', 'retries': retries}
r.status_code = 999
retry = False
except:
# we had an api error or a return with empty information
# either way, just fillna and continue
if retries < 30:
retry = True
time.sleep(1)
if retries > 2:
print('retrying ' + str(retries))
### some returns are caught here as well sadly...
else:
retry = False
# prepare for exit
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
# you have to validate this code because scopus has weird features going in which mess up data when overloading
return r, relevant_keys, cur_id_lower, prepend, id_type
crystal_scopus_abstract = appender(func=crystal_scopus_abstract, cur_id_name='eid')
###@appender(cur_id_name='eid')
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_scopus_abstract2(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
2 only gives abstract_text
"""
prepend = 'scopus_abstract_'
id_type = 'eid'
cur_id_lower = cur_id.lower() # irrelevant but OK
### not used
###if my_requests is None:
#### my_requests = requests # avoids passing requests around everytime
# some settings
# None
# the issue is that ab is not a requests-type
# but we need requests-type
# also, I do not want to use homebrew request code for it because scopus apis are an outsourced mess
# instead we will use a mock
relevant_keys = ['text', 'retries'] # all in one, care integration
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
# r = my_requests.get(url, params={}, headers={})
#
# scopus api is not friendly so I need a try/except here
#
# wait-and-retry
one_shot = False
if one_shot:
retries = 0
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
r = Mock(spec=Response)
try:
ab_abstract = ab.abstract
except:
# error in getting abstract out (outside API
ab_abstract = np.nan
r.json.return_value = {'text': ab_abstract, 'message': 'hi', 'retries':retries}
r.status_code = 999
# requirements:
# r.json().keys
# r.json()['message']
# r.json()['results'] # if not present, will not unpack and use json().keys()
except:
# if so, fall back to invalid routine
#
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# print(one_shot)
retry = True
retries = -1
while retry:
#retry = False # removes retries
retries = retries + 1
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
qq = ab.title
qqx = qq + 'x'
#
# if api does not error, and wepuyc have an title, then the call is correct and we got info back successfully
#
# then do rest of actions
r = Mock(spec=Response)
try:
ab_abstract = ab.abstract
except:
# error in getting abstract out (outside API
ab_abstract = np.nan
r.json.return_value = {'text': ab_abstract, 'message': 'hi', 'retries': retries}
r.status_code = 999
retry = False
except:
# we had an api error or a return with empty information
# either way, just fillna and continue
if retries < 30:
retry = True
time.sleep(1)
if retries > 2:
print('retrying ' + str(retries))
else:
retry = False
# prepare for exit
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
# you have to validate this code because scopus has weird features going in which mess up data when overloading
return r, relevant_keys, cur_id_lower, prepend, id_type
crystal_scopus_abstract2 = appender(func=crystal_scopus_abstract2, cur_id_name='eid')
class api_extractor:
"""
DEPRECATED: please stop using this... I will make a new one later, for now updates and patches are stopped
This class is an api extractor: it extracts info across api's.
Has multi-threading :)
Is not an eager operator so ScopusSearch query is only executed when needed and not on initialization
source_list: which sources to use, like unpaywall
query: query to put in scopussearch
Under construction: only does unpaywall data right now to test multi-threading
Also, I need an extra step for scopussearch datacleaning split-off
Dubbel-check ff of je de juiste funccorresponding_author_functionsties hebt, bv voor unpaywall drop_dupe stap bij merge
Plan nu: ff scopussearch-bypass erin, daarmee ff doortesten speedgain op grotere volumes
"""
def __init__(self,
query='TITLE(DATA SCIENCE) AND PUBDATETXT(February 2018)',
source_list=['all'],
max_num_workers=32):
self.source_list = source_list
self.query = query
self.scopus_search_info = None
self.scopus_search_info_ready = False
self.max_num_workers = max_num_workers
def get_scopus_search_info(self, cur_query):
"""
Gets the scopus search info and return it as dataframe of obj.results
Not yet handling errors of API...
"""
use_sleep_and_retry = True
if use_sleep_and_retry:
no_res = True
cntr=0
while no_res:
try:
res = pd.DataFrame(ScopusSearch(cur_query, refresh=True).results)
no_res = False
except:
cntr = cntr + 1
print(str(cntr) + ' ' + cur_query)
time.sleep(1)
else:
res = pd.DataFrame(ScopusSearch(cur_query, refresh=True).results)
return res
def feed_scopus_search_info(self, df_in, do_return=False, do_overwrite=False):
"""
This methods allows you to directly feed in a dataframe with scopussearch info,
of the form pandas.DataFrame(ScopusSearch().results)
"""
if (self.scopus_search_info_ready is False) | do_overwrite is True:
self.scopus_search_info = df_in
self.scopus_search_info_ready = True
if do_return:
return self.scopus_search_info
else:
print('scopus search info not updated because info was already entered and do_overwrite was provided False')
def extract(self, use_multi_thread=True, skip_scopus_search=False, skip_unpaywall=False,
use_parallel_apis=False):
"""
extract all chosen info
"""
# the functions like get_scopus_search_info and fn_get_upw_info,
# should always be single-thread in themselves,
# and we make them multi-thread outside of their own functions
#
# !!! we can further speed up by requesting from api providers in parallel
# that way we can further avoid api rate limits
# for this we need advanced functionality
# after writing the code, turn the default use_parallel_apis to True
#
#
# always redo scopus-search unless explicitly asked skip_scopus_search
# init
if not(self.scopus_search_info is None):
df_temp = self.scopus_search_info.copy()
doi_list = df_temp[~df_temp.DOI.isnull()].DOI.drop_duplicates().to_list()
#
# doi list issue happens here and in getupwdata line 161: search to_list, and doi/DOI difference
# here: add fn (read jupyter)
df_upw = pd.DataFrame()
df_ss = pd.DataFrame()
if use_multi_thread:
#ss
if skip_scopus_search is False:
# !!! please thoroughly test this
print('untested functionality called: multithread scopus search: careful!') # see fast_scopus_search_test.py for dev!
my_query = self.query # use own query
mini_queries = split_query_to_months(my_query)
count_queries = len(mini_queries)
# num_workers law: PLEASE TEST IT for optimum point or not
num_workers = np.max([1, int(np.floor(np.min([self.max_num_workers, np.floor(float(count_queries)/4.0)])))])
#
multi_result = multithreading(self.get_scopus_search_info, mini_queries, num_workers)
for cur_series in multi_result:
# we are appending dataframes, not series
df_ss = df_ss.append(cur_series, ignore_index=True)
###doi_list = df_ss.doi # check this !
## This is the point where parallel-api functionality should start(!)
if use_parallel_apis:
1
# please first make the apis work in single_thread
# then in regular multi-thread
# and finally in parallel_apis_multi_thread.
# 1. set sources using the skip_ arguments
# 2. choose max_workers using not on #dois but #dois*doi-apis + #eids*eid-apis
# 3. make a list with 1 element per job, including all details like
# [ [doi_1,'unpaywall'], [doi_1,'unpaywall'], [eid_1,'scival']. ...]
# 4. push that into multi-threading, but use a different function
# use the function I started below named get_parallel_api_info()
# this function picks up the source in element2 in a list element and
# directs to the right api function
# this makes the code superclean to support all forms of threading
# while keeping full functionality
# also, it needs to add a column with 'source' for differentiation
# 5. follow the unpaywall code below and append and done
# 6. for proper testing, split by source column back into df_upw/etc/etc
# and give the serial_api routine also a combined df for comparability
# 7. do extensive testing
# 8. do timing: how large is the speed gain quantitatively?
# this is probably best to test on high-end of very-high-end machines
# because we need to hit the api rate limits with serial_apis to see an effect
else:
#upw
if skip_unpaywall is False:
num_workers = np.max([1, int(np.floor(np.min([self.max_num_workers, np.floor(float(len(doi_list))/4.0)])))])
multi_result = multithreading(fn_get_upw_info, doi_list, num_workers)
for cur_series in multi_result:
df_upw = df_upw.append(cur_series, ignore_index=True)
#if ~skip_scival:
# 1
else:
# single-thread
# ss
if skip_scopus_search is False:
# query fed separately btw
# 2 lines for clarity for now
scopus_search_results = self.get_scopus_search_info(self.query) # care
self.feed_scopus_search_info(scopus_search_results) # store in properties
df_ss = scopus_search_results # combining results is trivial for single-thread
###doi_list = df_ss.doi # check this !
# upw
if skip_unpaywall is False:
for cur_doi in doi_list:
series_to_add = fn_get_upw_info(cur_doi)
df_upw = df_upw.append(series_to_add, ignore_index=True)
# scopussearch: the save and .self are issue for multithread, incl
# overwrite of results properties
# you need to fix that
# also, the num_workers law: you need to decide that differently too
# you prolly have 1 - 120 months, and 1 workers does 1 month a time
# so you need like #months/3 or a comparable version of the law below
return df_upw, df_ss # ! merge or combine or store properly later
def get_parallel_api_info(self, cur_id, source):
# please check if the multi-threader unpacks list elements, if so use 1 argument
# and unpack within the function to id/source
# to distinguish later, add the source as a column (is per DOI/EID)
source_dict = {'api_source' : source }
if source == 'unpaywall':
series_to_add = fn_get_upw_info(cur_id) # cur_id:cur_doi here
if source == 'scival':
1
series_to_add = series_to_add.append(pd.Series(source_dict))
return series_to_add
def change_max_num_workers(self, max_num_workers):
self.max_num_workers = max_num_workers
def split_query_to_months(query, silent=False):
"""
warning: did not pass testing, some data records may not be retrieved
This function splits a ScopusSearch query into multiple ones
It takes a query with year indication, and plits it to 1 query per month
This in turn allows the multi-threading functions of this import framework
to reduce the computation time
Otherwise, you will wait a very long serverside wait time and then get a
lot of data at once with massive download times and possibly more failures
input: a valid ScopusSearch query string which ends with exactly:
PUBYEAR > XXXX AND PUBYEAR < YYYY
with no other appearance of PUBYEAR text
and there is at least one valid year
Also, there should not be any month specification, only complete years
And incomplete years are not allowed (current year at time of call)
Also, the pubyear clauses should be extra clauses with ands at top level
please respect this format as the regex functionality is not perfect
advanced: the month january is also split up, because it generally is twice as large
as the other months
"""
# this code can be improved with regex
# extract years
final_year = str(int(query.split('PUBYEAR < ')[1]) - 1)
first_year = str(int(query.split('PUBYEAR > ')[1][0:4]) + 1)
rest_of_query = query.split('PUBYEAR > ')[0] # probably ending with ' AND'
# make year list
years = np.arange(int(first_year), int(final_year)+1)
# define month abbreviations (can split out later)
#calendar.month_name[ value between 1 and 12]
# example: PUBDATETXT(February 2018)
query_parts = []
for year in years:
for month_number in np.arange(1,12+1):
if month_number == 1:
# january is split again in two by open access y/n
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')'
+ ' AND OPENACCESS(1)')
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')'
+ ' AND OPENACCESS(0)')
else:
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')')
# careful with using ints and strs together
if ~silent:
print('query has been split up in ' + str(len(query_parts)) + ' queries for multi-threading')
return query_parts
def multithreading(func, args,
workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def multithreading_starmap(func, args,
workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.starmap(func, args)
return list(res)
def multiprocessing(func, args,
workers):
with ProcessPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def my_timestamp():
# return a sring with current time info
now = datetime.now()
return '_'.join(['', str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute), str(now.second)])
def add_deal_info(path_deals, path_isn, df_b):
"""
This function adds columns with deal information to your dataframe
:param path_deals: path to csv with deals, must have columns: 'ISN':'deal_ISN',
'Titel':'deal_journal_title',
'Deal naam':'deal_name',
'Deal korting':'deal_discount',
'Deal type':'deal_owner',
'Deal bijgewerkt':'deal_modified',
'ISSN':'deal_ISSN'
:param path_isn: path to csv with table from isn to issn numbers, must have columns ISN and ISSN as translation,
:param df_b: dataframe with at lesat the columns: issn, eIssn, upw_oa_color
The parameters should not have any columns matching the names of columns the function is trying to add
:return: your input dataframe df_b with extra columns
"""
# load in data from apc deals and isn-issn translation table
# apc deals
df_d_base = pd.read_csv(path_deals)
# isn issn translation table
df_t = pd.read_csv(path_isn)
# cleaning
df_b.at[df_b[df_b.issn.apply(lambda x: True if isinstance(x, list) else False)].index.tolist(), 'issn'] = None
# now translate isn<>issn
df_d = df_d_base.merge(df_t, left_on='ISN', right_on='ISN', how='left')
# rename columns for readability
df_d = df_d.rename(columns={'ISN': 'deal_ISN',
'Titel': 'deal_journal_title',
'Deal naam': 'deal_name',
'Deal korting': 'deal_discount',
'Deal type': 'deal_owner',
'Deal bijgewerkt': 'deal_modified',
'ISSN': 'deal_ISSN'})
# remove punctuation in ISSN
df_d['deal_ISSN_short'] = df_d.deal_ISSN.apply(lambda x: np.nan if x is np.nan else x[0:4] + x[5::])
# drop deals without ISSN to avoid bad merges (can upgrade later to match on j-names)
df_d = df_d[~df_d.deal_ISSN.isnull()]
# merge on both issn and eIssn (extensive exploration show this is safe, see file apcdeals1.ipnyb)
#
# complex merge-strategy here with dropping columns
df_m = df_b.merge(df_d, left_on='issn', right_on='deal_ISSN_short', how='left')
df_m = df_m.reset_index().rename(columns={'index': 'my_index'})
cols_d = list(df_d)
df_m_part_1 = df_m[~df_m.deal_ISSN.isnull()]
df_m_part_2 = df_m[df_m.deal_ISSN.isnull()].drop(cols_d, axis=1).merge(df_d, left_on='eIssn',
right_on='deal_ISSN_short', how='left')
df_m = df_m_part_1.append(df_m_part_2)
df_m = df_m.sort_values('my_index').reset_index().drop(['index', 'my_index'], axis=1)
#
# give nans some intuition
df_m['deal_discount_verbose'] = df_m['deal_discount'].apply(lambda x: 'no known deal' if x is np.nan else x)
# df_m['upw_oa_color_verbose'] = df_m['upw_oa_color'].apply(lambda x: 'unknown' if x is np.nan else x) # wrongplace
df_m['deal_owner_verbose'] = df_m['deal_owner'].apply(lambda x: 'no known deal' if x is np.nan else x)
return df_m
def pre_process_pure_data(df,
org_info,
path_to_save=None,
test_mode_upw=False,
do_save=False,
silent=False):
"""
Sorry for documentation, time lines are tight.
This goes in:
df = dataframe from pure, conditions are tight (will follow)
org_info is an excel with 2 columns, 1 'Naam' and 1 'Faculteit' which map groups to faculties
path_to_save: path to where to save as string
test_mode_upw: whether you want to do unpaywall load for first few records or all of them
do_save: whether you want to save or not
This comes out:
the cleaned, preprocesses dataframe with unpaywall
"""
# clean column numbering first
df.columns = [re.sub('^\d+.', "", x) for x in
df.columns] # remove at start of string where 1 or more digits
df.columns = [re.sub('^\d+', "", x) for x in df.columns]
df.columns = [re.sub('^ ', "", x) for x in df.columns]
df.columns = [re.sub('^.\d+', "", x) for x in df.columns]
df.columns = [re.sub('^ ', "", x) for x in df.columns]
# hidden settings
#
df = df[[
'Title of the contribution in original language',
'Current publication status > Date',
#'5.1 Publication statuses and dates > E-pub ahead of print[1]',
'Subtitle of the contribution in original language', # new
'Type',
'Workflow > Step',
'Original language',
'Electronic version(s) of this work > DOI (Digital Object Identifier)[1]',
'Organisations > Organisational unit[1]',
'Organisations > Organisational unit[2]',
'Organisations > Organisational unit[3]',
'Organisations > Organisational unit[4]',
'Organisations > Organisational unit[5]',
'Organisations > Organisational unit[6]',
'Organisations > Organisational unit[7]',
'Organisations > Organisational unit[8]',
'Organisations > Organisational unit[9]',
'Organisations > Organisational unit[10]',
'Journal > Journal[1]:Titles',
'Journal > Journal[1]:ISSNs',
# '14.3 Journal > Journal[1]:Additional searchable ISSN (Electronic)',
'UUID',
# '18 Created',
# "33.1 Keywords in 'Open Access classification'[1]"
]]
admitted_types = ['Chapter in Book / Report / Conference proceeding - Chapter',
'Contribution to Journal - Article',
'Contribution to Conference - Paper',
'Book / Report - Report',
'Book / Report - Book',
'Chapter in Book / Report / Conference proceeding - Conference contribution',
'Contribution to Journal - Review article',
] ## OVERWRITES LATER
# I will play safe for now, can always post-filter it
accepted_amsco_types_sample = ['Contribution to journal - Article',
'Chapter in Book/Report/Conference proceeding - Chapter',
'Chapter in Book/Report/Conference proceeding - Foreword/postscript',
'Book/Report - Book',
'Contribution to journal - Review article',
###'Contribution to journal - Comment/Letter to the editor',
#'Thesis - Thesis: Research University of Amsterdam, graduation University of Amsterdam',
'Book/Report - Report',
#'Non-textual form - Web publication/site',
#'Book/Report - Book editing',
#'Thesis - Thesis: Research external, graduation external',
'Contribution to journal - Editorial',
'Chapter in Book/Report/Conference proceeding - Conference contribution',
#'Book/Report - Inaugural speech',
#'Working paper - Working paper',
'Contribution to conference - Paper',
'Contribution to conference - Abstract',
# 'Case note - Case note',
'Contribution to journal - Meeting Abstract',
'Contribution to journal - Book/Film/Article review',
#'Contribution to conference - Poster',
'Contribution to journal - Special issue',
###'Contribution to journal - Erratum/Corrigendum',
#'Non-textual form - Exhibition',
'Chapter in Book/Report/Conference proceeding - Entry for encyclopedia/dictionary',
#'Thesis - Thesis: Research University of Amsterdam, graduation external',
'Contribution to journal - Letter',
'Contribution to journal - Short survey',
#'Book/Report - Valedictory speech',
#'Contribution to journal - Literature review (NOT USED)',
#'Thesis - Thesis: Research external, graduation University of Amsterdam',
#'Non-textual form - Digital or Visual Products'
]
admitted_types = ['Chapter in Book / Report / Conference proceeding - Chapter',
'Contribution to Journal - Article',
'Contribution to Conference - Paper',
'Book / Report - Report',
'Book / Report - Book',
'Chapter in Book / Report / Conference proceeding - Conference contribution',
'Contribution to Journal - Review article',
] + accepted_amsco_types_sample
# pre-processing
#
# some robustness needed... some asserts too
#
admitted_types_lower = pd.DataFrame(admitted_types)[0].str.lower().to_list()
print('pure unprocessed has this many rows: ' + str(len(df)))
df = df[df['Type'].str.lower().isin(admitted_types_lower)]
print('pure processed has this many rows: ' + str(len(df)))
###df = df[df['Type'].isin(admitted_types)]
###df = df[df['Type'].isin(admitted_types)]
df['DOI'] = df['Electronic version(s) of this work > DOI (Digital Object Identifier)[1]']
# add unpaywall info
#
ae = api_extractor(max_num_workers=16) # care: not tested sufficiently, may give too many error returns
if test_mode_upw:
ae.feed_scopus_search_info(df_in=df.iloc[0:1,:], do_overwrite=True) # 0:1 saves 15sec wait per year of data
df_res_upw, _ = ae.extract(use_multi_thread=False, skip_scopus_search=True, skip_unpaywall=False,
use_parallel_apis=False)
else:
print('multithread is not used')
ae.feed_scopus_search_info(df_in=df, do_overwrite=True)
df_res_upw, _ = ae.extract(use_multi_thread=False, skip_scopus_search=True, skip_unpaywall=False,
use_parallel_apis=False)
#
# merge back in with orig_doi no nans
# cleaning is done in the import framework, saving us work and duplicate code : )
# ! Not sure if dois in pure have an error, causing a mismatch with scopus and unpaywall
print(list(df_res_upw))
print(df_res_upw.head(1))
df = df.merge(df_res_upw, left_on = 'DOI', right_on = 'orig_doi', how = 'left')
df['upw_oa_color_verbose'] = df['upw_oa_color'].apply(lambda x: 'unknown' if x is np.nan else x)
###df_m['pure_oa_class_verbose'] = df_m["33.1 Keywords in 'Open Access classification'[1]"].apply(lambda x: 'unknown' if x is np.nan else x)
# add faculty_finder info exploiting pure org columns
#
ff = faculty_finder(organizational_chart=org_info)
#
#
if silent is False:
trysize = 100
start = time.time()
df.loc[0:trysize,"Organisations > Organisational unit[1]"].apply(lambda x: ff.match(x))
end = time.time()
print(end-start)
print('that was time for 100 entries, but total df is: ')
print(len(df))
print('now doing all of em')
print('this will probably take ' + str(float(len(df))/trysize*(end-start)) + ' seconds')
#
#
df['ff'] = df.loc[:,"Organisations > Organisational unit[1]"].apply(lambda x: ff.match(x))
df.loc[:, 'ff_provided_organization_string'] = df.ff.apply(lambda x: x['ff_provided_organization_string'])
df.loc[:, 'ff_match'] = df.ff.apply(lambda x: x['ff_match'])
df.loc[:, 'ff_score'] = df.ff.apply(lambda x: x['ff_score'])
df.loc[:, 'ff_terms'] = df.ff.apply(lambda x: x['ff_terms'])
df.loc[:, 'ff_message'] = df.ff.apply(lambda x: x['ff_message'])
df.loc[:, 'ff_match_subgroup'] = df.ff.apply(lambda x: x['ff_match_subgroup'])
#
# evaluation is in pure_integratie.ipnyb
# for completeness, I also want ff_match based on org_info
# extra processing
df['DOI_isnull'] = df.DOI.isnull()
df['pub_uuid'] = df['UUID']
# now save
if do_save:
df.to_csv(path_to_save)
return df
def get_eid_uuid_data(host, database, user, pw, silent=False):
"""
This function obtains the EID<>PURE_PUB_UUID table from our extrapure database
It immediately works for all years at once
:param host: host database (IP)
:param database: database name
:param user: user to log into database with
:param pw: password to log into database with
:param silent: whether you want to silence extra prints or not
:return: 1 a dataframe with 2 columns as EID<>PURE_PUB_UUID table if success, otherwise just None
2 a boolean which is True iff success otherwise False
"""
try:
connection = mysql.connector.connect(host=host,
database=database,
user=user,
password=pw)
sql_select_Query = "select * from scopus_has_publication"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
df_t = pd.DataFrame(records).rename(columns={0: 'eid', 1: 'pub_uuid'})
if silent is False:
print("Total number of rows is: ", cursor.rowcount)
success = True
except Error as e:
#always print this, later also add logging
print("Error reading data from MySQL table", e)
print('returning None')
df_t = None
success = False
finally:
if (connection.is_connected()):
connection.close()
cursor.close()
if silent is False:
print("MySQL connection is closed")
return df_t, success
def fn_cats(row):
if row == 'closed':
result = 1
elif row == 'hybrid':
result = 2
elif row == 'bronze':
result = 3
elif row == 'green':
result = 4
elif row == 'gold':
result = 5
else:
result = 0 # nans etc
return result
def left_pad(my_str):
if len(my_str) < 2:
return '0' + my_str
else:
return my_str
def get_today():
return str(datetime.now().year) + '-' + left_pad(str(datetime.now().month)) + '-' + left_pad(
str(datetime.now().day))
def get_today_for_pubdatetxt():
return left_pad(calendar.month_name[datetime.now().month]) + ' ' + str(datetime.now().year)
def get_today_for_pubdatetxt_integers(year, month):
return left_pad(calendar.month_name[month]) + ' ' + str(year)
def get_today_for_pubdatetxt_super(months_back=0):
# remove datetime. later
# dt_obj = datetime.datetime.now() - datetime.timedelta(days=datetime.datetime.now().day)
if months_back == 0:
dt_obj = datetime.now()
else:
cntr = months_back
dt_obj = datetime.now()
while cntr > 0:
dt_obj = dt_obj - timedelta(days=dt_obj.day)
#print(dt_obj)
cntr -= 1
return left_pad(calendar.month_name[dt_obj.month]) + ' ' + str(dt_obj.year)
def make_types_native_basic(lst):
res = []
for ii in lst:
#print(type(ii))
if (type(ii) == np.int32) | (type(ii) == np.int64):
#print('aa')
res.append(int(ii))
else:
res.append(ii)
return res
def add_abstract_to_scopus(start_path,
year,
do_save_csv=True):
"""
Combines the scopus pickle and old scopus csv into a new one with cleaned abstract text
:param start_path: the starting path where all input/output goes. Subdirectories are required.
this function requires the subfolder:
- 'scopus_processed' with 'pickle_OA_VU'+year+'_met_corresponding_authors.pkl' and for every year
'knip_OA_VU'+year+'_met_corresponding_authors.csv'
:param do_save_csv: whether you want to output a csv or not (will overwrite)
:return: Nothing
"""
#
# get scopus pkl file
print('started reading a pickle ')
df_pickle = pd.read_pickle(start_path + '/scopus_processed/pickle_OA_VU' \
+ str(year) + '_met_corresponding_authors.pkl')
print('finished reading a pickle ')
# make abstract text and clean it
df_pickle['abstract_text'] = df_pickle.apply(get_abstract_if_any, axis=1)
df_pickle['abstract_text_clean'] = (df_pickle['abstract_text']
.apply(comma_space_fix)
.apply(remove_punctuation))
df_pickle = df_pickle[['eid', 'abstract_text_clean']]
if ((len(df_pickle[df_pickle.eid.isnull()]) > 0)
| (df_pickle.eid.apply(lambda x: x is None).max())
| (df_pickle.eid.apply(lambda x: x == 'None').max())):
print('merge issue: df_pickle for abstract text has some null eids')
#
# read scopus
df_k = pd.read_csv(start_path + '/scopus_processed/knip_OA_VU' + str(
year) + '_met_corresponding_authors.csv')
#
# merge with scopus
df_m = df_k.merge(df_pickle[['eid', 'abstract_text_clean']], on='eid', how='left')
if len(df_m) != len(df_k):
print('len messed up')
#
# save it
if do_save_csv:
df_m.to_csv(start_path + '/scopus_processed/knip_OA_VU' \
+ str(year) + '_met_abstract_tekst.csv')
return None
def merge_pure_with_scopus_data(df_p, df_s, df_t):
"""
This functions merges a pre-processed Pure dataframe with a pre-processed Scopus dataframe, also uses extrapure
It is a mega-merge using EID, DOI and title with advanced rule sets. Soft-title-match is not included.
There is room for improvement: a doi-cleaner would be nice like '10.' start for all entries
This function is year_range-indifferent and will work with any year-range or period-range
:param df_p: Dataframe from pure, must be preprocessed with pre_process_pure_data()
:param df_s: Dataframe from scopus, must be enriched through open-access-pipeline (! not yet in Pycharm !)
:param df_t: Dataframe from xpure with eid to uuid. Careful with UUID: every PURE repo has different uuids.
:return: df_combined (the merged dataframe including merge_source), diagnostics (is None right now)
"""
# we need to clean the dois otherwise the doi merge will fail
# I am going to take a small risk and do a overwrite...
df_p_backup = df_p.copy()
df_s_backup = df_s.copy()
df_p['DOI'] = df_p.DOI.apply(lambda x: x.replace('https://doi.org/', '') if pd.notnull(x) else x)
df_s['doi'] = df_s.doi.apply(lambda x: x.replace('https://doi.org/', '') if pd.notnull(x) else x)
# 1. use df_t to enrich df_p with eids, continue with df_m
df_m = df_p.merge(df_t, left_on='pub_uuid', right_on='pub_uuid', how='left')
df_m['has_eid'] = ~df_m.eid.isnull()
if len(df_m[df_m['Title of the contribution in original language'].isnull()] > 0):
print('there were records with empty titles and those were discarded')
# 2. de-duplicate left=pure and right=scopus
# 2A. de-dupe for eids
# assumption: last duplicate entry is correct, rest is false
# we need to preserve records which have NaNs in their eids
# plan of attack: split part with eid, de-dupe it w/o worrying about nan eids, then re-append the part w/o eid
df_m = df_m[df_m.eid.isnull()].append(df_m[~df_m.eid.isnull()].drop_duplicates(subset=['eid'], keep='last'))
if df_m[~df_m.eid.isnull()].eid.value_counts().max() != 1:
print('eid de-duplication failed somehow, you can ignore this if you used AMSCO-data')
# 2B. de-duplicate on DOI
# some are maked as 'do_not_merge_on_DOI' which is an advanced feature
# assumptions:
# step 1. all records with double DOI except for books and book chapters: keep=last, drop other records
# step 2. all records with double DOI and =book or =bookchapter: add a flag to not merge on DOI at all,
# keep rest as is so we can unpaywall it later
#
# prepare support variables
doi_counts = df_m[~df_m.DOI.isnull()].DOI.value_counts().sort_values(ascending=False)
double_doi = doi_counts[doi_counts > 1].index.to_list() # for future use or to mail Reinout or whatever
df_m['type_contains_book'] = df_m.Type.str.lower().str.contains('book')
#
# step 1: drop some of the DOI duplicates (see step 1/2 disc above)
df_m = (df_m[(~df_m.DOI.isin(double_doi)) | (df_m.type_contains_book)]
.append(df_m[(df_m.DOI.isin(double_doi)) & (~df_m.type_contains_book)]
.drop_duplicates(subset='DOI', keep='last')))
#
# step 2: prepare 'do_not_merge_on_DOI' tag
###df_m['do_not_merge_on_DOI'] = ((df_m.DOI.isin(double_doi)) & (df_m.type_contains_book))
#
df_m['do_not_merge_on_DOI'] = (df_m.DOI.isin(double_doi))
#
doi_counts = df_m[~df_m.DOI.isnull()].DOI.value_counts().sort_values(ascending=False)
double_doi = doi_counts[doi_counts > 1].index.to_list() # for future use or to mail Reinout or whatever
if df_m[df_m.DOI.isin(double_doi)].do_not_merge_on_DOI.mean() != 1:
print('doi de-duplication failed somehow')
# this sometimes happens due to doi-https-cleaning??? No
# this happens when there are book-types with duplicate dois: a rare happening, and it will mess up stuff
# why: you will get issues during merging
# proposed solution: if two different bookparts have the same doi, do not merge on doi at all
# that is the safest, as STM has a good chance of post-fixing it.
# but do not delete those records though, just do not merge on doi (they are different pieces after all)
# 2C. de-duplicate on titles
#
# drop records where there are more than 1 word in the title (where title duplicate)
# where there is 1 word in the title, we cannot drop, and we should not merge either, so isolate those
# like 'introduction' can be title-dupe, of course, and still be a unique article
#
# this is a hard choice, but it is probably best to remove dupes and add flags before any merge happens,
# in order to avoid having dupes with different eids appear twice in merged and unmerged form
# the total affected records are 0.7% and the chance on a missing merge is even smaller
# this is an assumption: we assume we the kept dupes are the correct and best ones here
#
# helper variables
df_double_titles = df_m['Title of the contribution in original language'].value_counts()
double_titles = df_double_titles[df_double_titles > 1].index.to_list()
#
# btw: these are exclusive sets, any record can belong to maximally one of these two groups
df_m['is_dupe_based_on_long_title_dupe'] = (
(df_m['Title of the contribution in original language'].isin(double_titles))
& (df_m['Title of the contribution in original language'].str.split().str.len() > 1))
df_m['do_not_merge_on_title'] = ((df_m['Title of the contribution in original language'].isin(double_titles))
& (df_m[
'Title of the contribution in original language'].str.split().str.len() == 1))
#
# now we need to remove dupes
# split into two, drop dupes, then combine back
df_m = (df_m[df_m['is_dupe_based_on_long_title_dupe']]
.drop_duplicates(subset=['Title of the contribution in original language'], keep='last')
.append(df_m[~df_m['is_dupe_based_on_long_title_dupe']]))
#
# end of de-duplication and tagging 'do_not_merge_on_DOI' and 'do_not_merge_on_title'
# 3. Perform the mega-merge
#
# drop where title is empty
df_m = df_m[~df_m['Title of the contribution in original language'].isnull()]
if len(df_m[df_m['Title of the contribution in original language'].isnull()]) > 0:
print('dropped ' + str(len(
df_m[df_m['Title of the contribution in original language'].isnull()])) + ' records for no title present')
#
# all variables of step 1
#
# first part of pure with eid
df_A = df_m[~df_m.eid.isnull()]
df_BC = df_m[df_m.eid.isnull()]
#
# inner-merged part of A and Scopus
df_Amerged_SA = df_A.merge(df_s, on='eid', how='inner')
#
# find out which eids were merged on
merged_eids = set(df_Amerged_SA.eid.unique())
# merged parts of left and right
df_Amerged = df_A[df_A.eid.isin(merged_eids)]
df_SA = df_s[
df_s.eid.isin(merged_eids)] # remember we de-duplicated for eids, dois and titles, therefore this should work
# unmerged parts left and right
df_Aunmerged = df_A[~df_A.eid.isin(merged_eids)]
df_Sunmerged1 = df_s[~df_s.eid.isin(merged_eids)]
#
# reflux df_Aunmerged
df_BC_Aunmerged = df_BC.append(df_Aunmerged)
#
# all variables of step 2
# do respect 'do_not_merge_on_DOI'
#
# grab from PURE table the B, the C and the Aunmerged parts only
# do not grab Amerged because we do not want to merge the merged parts again ever
# from these parts, isolate the parts which fulfill the two conditions: has DOI and has no flag to not merge on DOI
# these should be attempted to merge on DOI with Scopus (again, do not merge twice, use Sunmerged1 for this)
# after the merge can obtain the DOIs that merged and use that to split Bmerged and Bunmerged
# notice that there is a difference with the initial plan: Bunmerged will not contain do_not_merge_on_DOI-set at all
# To reduce complexity and adhere to the original plan, we will append the do_not_merge_on_DOI-set to Bunmerged
#
# also, df_BC_Aunmerged splits up in 3 parts
# first we cut off the do_not_merge_on_DOI pat
# then we cut the rest in two: one part without DOI and one part with DOI
# this last part is the merge_candidate for step 2/B
df_merge_candidate_B = df_BC_Aunmerged[(~df_BC_Aunmerged.DOI.isnull()) & (~df_BC_Aunmerged.do_not_merge_on_DOI)]
df_BC_Aunmerged_wo_DOI_may_merge = df_BC_Aunmerged[
(df_BC_Aunmerged.DOI.isnull()) & (~df_BC_Aunmerged.do_not_merge_on_DOI)]
df_do_not_merge_on_DOI = df_BC_Aunmerged[df_BC_Aunmerged.do_not_merge_on_DOI]
#
# merge
# assumption: we assume flat doi merge is perfect (we do not lowercase or clean starts or anything)
# diagnostics: this merges 15 out of 328 pure entries with DOI
# lowercasing only affects 20% roughly, but merge stays at 15
# 8 records in total have start different than '10.'
# I will leave it as uncleaned doi-merging here because the added value is very small
df_Bmerged_SB = df_merge_candidate_B.merge(df_Sunmerged1, left_on='DOI', right_on='doi', how='inner')
#
# find out which dois were merged on
merged_dois = set(df_Bmerged_SB.DOI.unique())
merged_dois
# merged parts of left and right
df_Bmerged = df_merge_candidate_B[df_merge_candidate_B.DOI.isin(merged_dois)]
df_SB = df_Sunmerged1[df_Sunmerged1.doi.isin(merged_dois)]
# unmerged parts left and right
df_Bunmerged_temp = df_merge_candidate_B[~df_merge_candidate_B.DOI.isin(merged_dois)]
df_Sunmerged2 = df_Sunmerged1[~df_Sunmerged1.doi.isin(merged_dois)]
#
# append the do_not_merge_on_DOI-set to Bunmerged afterwards
# remember to add the do_not_merge_on_DOI set to df_Bunmerged
# notice that defining every part explicitly makes this less difficult
df_Bunmerged = df_Bunmerged_temp.append(df_do_not_merge_on_DOI)
#
# info:
# in step 2 the unmerged parts together were df_BC_Aunmerged
# we split that now into:
# 1. df_do_not_merge_on_DOI
# 2. df_BC_Aunmerged_wo_DOI_may_merge
# 3. df_merge_candidate_B, which consists of df_Bmerged and df_Bunmerged_temp
# Also, df_Bunmerged is basically df_Bunmerged_temp + df_do_not_merge_on_DOI
#
# so what will be the unmerged part for the next step then?
# df_do_not_merge_on_DOI + df_BC_Aunmerged_wo_DOI_may_merge + df_Bunmerged_temp
# or equivalently:
# df_Bunmerged + df_BC_Aunmerged_wo_DOI_may_merge
# or equivalently:
# the unmerged set of the next step is the unmerged set of this step, minus df_Bmerged because that part merged
# but we'd rather append than 'substract' so we build it up as (in reflux formulation):
#
# unmerged part for the next step = df_BC_Aunmerged_wo_DOI_may_merge + df_Bunmerged
# verified logically a few times now, let's continue
#
# reflux df_Bunmerged
df_C_Bunmerged = df_BC_Aunmerged_wo_DOI_may_merge.append(df_Bunmerged)
#
# all variables of step 3
# do respect 'do_not_merge_on_title'
#
# the unmerged set is exactly df_C_Bunmerged
# but not everything is merge candidate
# we have to isolate the do_not_merge_on_title set
df_do_not_merge_on_title = df_C_Bunmerged[df_C_Bunmerged.do_not_merge_on_title]
df_merge_candidate_C = df_C_Bunmerged[~df_C_Bunmerged.do_not_merge_on_title]
# notice that we do not split into whether title is present, because title-less records were discarded (0 in 2018)
#
# now we have to try to merge on title
# first we do an exact match merge,
# for the rest we evaluate the levenshtein distance
# exploration indicated that we expact very favourable 0/1 splits and no gray zone, but let's try it out
#
# first exact match on title
df_Cmerged_SC_exact = df_merge_candidate_C.merge(df_Sunmerged2,
left_on='Title of the contribution in original language',
right_on='title',
how='inner')
# now split merged, unmerged and do_not_merge
# find out which eids were merged on
merged_titles = set(df_Cmerged_SC_exact.title.unique())
# merged parts of left and right
df_Cmerged = df_merge_candidate_C[
df_merge_candidate_C['Title of the contribution in original language'].isin(merged_titles)]
df_SC = df_Sunmerged2[df_Sunmerged2.title.isin(merged_titles)]
# unmerged parts left and right
df_Cunmerged_temp = df_merge_candidate_C[
~df_merge_candidate_C['Title of the contribution in original language'].isin(merged_titles)]
df_Sunmerged3 = df_Sunmerged2[~df_Sunmerged2.title.isin(merged_titles)]
# and we have the do_not_merge_on_title set ready, do not forget, better add it now
df_Cunmerged = df_Cunmerged_temp.append(df_do_not_merge_on_title)
#
#
# This is without soft-title-matching!
# generate resulting combined table (name it SP)
# ! careful! you cant just add stuff, we absorbed Aunmerged for example!
# first append cols to unmerged parts
if len(df_Amerged_SA) > 0:
df_Amerged_SA.loc[:, 'merge_source'] = 'both'
else:
df_Amerged_SA['merge_source'] = None
df_Bmerged_SB.loc[:, 'merge_source'] = 'both'
df_Cmerged_SC_exact.loc[:, 'merge_source'] = 'both'
df_Cunmerged.loc[:, 'merge_source'] = 'pure'
df_Sunmerged3.loc[:, 'merge_source'] = 'scopus'
df_combined = (df_Amerged_SA
.append(df_Bmerged_SB, sort=False)
.append(df_Cmerged_SC_exact, sort=False)
.append(df_Cunmerged, sort=False)
.append(df_Sunmerged3, sort=False))
diagnostics = None
return df_combined, diagnostics
def prepare_combined_data(start_path,
year_range,
xpure_pack,
add_abstract=True,
skip_preprocessing_pure_instead_load_cache=False, # safe
remove_ultra_rare_class_other=True,
org_info=pd.read_excel( static.PATH_START + 'raw data algemeen/vu_organogram_2.xlsx', skiprows=0)):
"""
This function prepares the combined data for a chosen year_range
The raw pure files and processed scopus files per year should be available
Next step: test this function!
Remember that you must do a fresh run if you want any different year range !
In no way can the results be stacked across different executions of this function (including any soft-title-match)
Because otherwise you will introduce duplicates with that stacking
:param start_path: the starting path where all input/output goes. Subdirectories are required.
this function requires the subfolder:
- 'scopus_processed' with 'pickle_OA_VU'+year+'_met_corresponding_authors.pkl' and for every year
'knip_OA_VU'+year+'_met_corresponding_authors.csv'
in year_range
-
:param year_range:
:param add_abstract:
:param remove_ultra_rare_class_other:
:param skip_preprocessing_pure_instead_load_cache:
:return:
"""
# 0. unpack xpure settings
[host, database, user, pw] = xpure_pack
# 1. prepare helper variables
# 1A. wrap immutable parameters
year_range = list(year_range)
# 1B. load xpure user/pass
#host = pd.read_csv(path_pw + '/password_xpure.csv').host[0]
#database = pd.read_csv(path_pw + '/password_xpure.csv').database[0]
#user = pd.read_csv(path_pw + '/password_xpure.csv').user[0]
#pw = pd.read_csv(path_pw + '/password_xpure.csv').pw[0]
# 2. add abstract
if add_abstract:
# add the abstract and set scopus_variant to use this enriched csv
scopus_variant = '_met_abstract_tekst.csv'
for year in year_range:
add_abstract_to_scopus(start_path, year) # verified: safe for per-year run (scopus<>scopus only)
else:
# do not add an abstract and use the original csv
scopus_variant = '_met_corresponding_authors.csv'
print('start 3')
# 3. Obtain df_combined for a single year
# includes obtaining processed pure, scopus and xpure data, then merging it and saving csvs
df_p_multi_year = pd.DataFrame()
df_s_multi_year = pd.DataFrame()
# df_t is always multi-year
for year in year_range:
path_pure_unprocessed = start_path + '/pure_raw/vu' + str(year) + '_public_raw.xls'
path_scopus = start_path + '/scopus_processed/knip_OA_VU' + str(year) + scopus_variant
path_to_save_or_load_processed_pure = start_path + '/pure_processed/processed_pure' + str(year) + '.csv'
# 3.1: get processed pure data
# pre-process the pure data or load a cache
if skip_preprocessing_pure_instead_load_cache:
# load processed pure in directly
df_p = pd.read_csv(path_to_save_or_load_processed_pure)
else:
# load in unprocessed pure, process it, save it, read it
df_p_unprocessed = pd.read_excel(path_pure_unprocessed)
df_p = pre_process_pure_data(df=df_p_unprocessed,
org_info=org_info,
path_to_save=start_path + '/pure_processed/processed_pure' + str(year) + '.csv',
test_mode_upw=True, # True avoids waste since our enriched scopus has it too
do_save=True) # just always save
# 3.2: get processed scopus data
df_s = pd.read_csv(path_scopus)
#
# append to stack years
df_p_multi_year = df_p_multi_year.append(df_p, ignore_index=True)
df_s_multi_year = df_s_multi_year.append(df_s, ignore_index=True)
# these parts are multi_year
# 3.1&3.2 extra: reset indices and append year columns where necessary
df_p_multi_year = df_p_multi_year.reset_index(drop=True)
df_s_multi_year = df_s_multi_year.reset_index(drop=True)
df_s_multi_year['scopus_year'] = df_s_multi_year.year
if np.min(year_range) >= 2000:
df_p_multi_year = add_pure_year(df_p_multi_year, date_col='Current publication status > Date')
else:
print('violation of the post-2000 assumption for using pure year information')
df_p_multi_year = add_pure_year(df_p_multi_year, date_col=None)
# 3.3: get xpure data
df_t, success_df_t = get_eid_uuid_data(host, database, user, pw, silent=False)
# 3.4: run the merger for all years at once to avoid the cross-year issue where scopus and pure have different years
df_combined, diagnostics_merger = merge_pure_with_scopus_data(df_p_multi_year, df_s_multi_year, df_t)
# 3.5: prepare identifiers for STM to back-merge on... put this higher up please
df_combined['post_merge_id'] = 1
df_combined['post_merge_id'] = df_combined['post_merge_id'].cumsum()
# this post_merge_id also lives forth in df_chosen_year and the unmerged csvs, so you can use it for STM backmerge
# 4. remove rare classes if desired
if remove_ultra_rare_class_other:
df_combined = df_combined[
df_combined.ff_match != 'VU - Other Units'] # prevent issues with a brand new ultra-rare class please
# overwrite it
df_combined.to_csv(start_path + '/merged_data/df_total.csv')
df_combined.to_pickle(start_path + '/merged_data/df_total.pkl')
print('start 5')
# 5: save the full data
df_combined.to_csv(start_path +
'/merged_data/df_total.csv')
df_combined.to_pickle(start_path +
'/merged_data/df_total.pkl')
# 6. return the verified middle year (which does not suffer from cross-year issue)
# Remember that you must do a fresh run if you want any different year range !
# how do we filter df_combined?
# P+S and S rows: filter on scopus_year
# P rows: filter on pure_year
# this is safe as long as you only do this with a single df_combined for any slice you want
# why?
# the df_combined is by assumption duplicate-free. All duplicates of raw-P and raw-S are removed,
# and then they are merged and again duplicates are removed over the rows.
# Because all P+S&S rows have exactly only 1 year,
# and the P rows have exactly only 1 year as well
# so any proper slice is safe as you don't have anything double if you slice over years and stack again
# however, you should not involve a second df_combined as the paper may merge in one df_combined and remain
# unmerged in another df_combined due to a different year_range, and subsequently get a different year to slice on
# like scopus_year in one and pure_year in another
# this is an intricate detail, so please avoid such a merge and just re-run or else the data will be dirty and you
# will not notice at all probably
for chosen_year in year_range[1:-1]: # drop edges regardless, not checking if last year is last (due future papers!)
df_chosen_year = (df_combined[(df_combined.merge_source == 'pure')
&
(df_combined.pure_year == chosen_year)]
.append(df_combined[(df_combined.merge_source != 'pure')
&
(df_combined.scopus_year == chosen_year)]
)
)
df_chosen_year.to_pickle(start_path + '/merged_data/df_total' + str(chosen_year) + '.pkl')
df_chosen_year.to_csv(start_path + '/merged_data/df_total' + str(chosen_year) + '.csv')
# 7. isolate unmerged for soft-title-matching: [ notice we do this post-everything to allow early-access-data]
# df_unmerged = df_combined[(df_combined.merge_source != 'both')]
# df_unmerged.to_csv(start_path + '/merged_data/df_unmerged.csv')
df_unmerged_pure = df_combined[df_combined.merge_source == 'pure']
df_unmerged_scopus = df_combined[df_combined.merge_source == 'scopus']
# save to csv
df_unmerged_pure.to_csv(start_path + '/df_unmerged_pure.csv') # equivalent to df_combined/ms=pure
df_unmerged_scopus.to_csv(start_path + '/df_unmerged_scopus.csv') # equivalent to df_combined/ms=scopus
# 8. you can now run STM with its current settings
#
# I am not sure how to deal with the multiprocessing aspect and hardcode entry
# 1. config and run prepare_combined_data to get triple-merge
# 2. config and run nlp2
# 3. config and run incorporate_stm_results
return df_chosen_year
def get_altmetric(row, col='cited_by_policies_count'):
"""
legacy code, please use crystal_altmetric() or its aliases instead
Returns Altmetric's cited_by_policies_count for a given doi
! There is no internal cleaning yet
If DOI is empty or altmetric returns None, then function returns np.nan
If Altmetric returns non-empty but cited_by_policies_count is missing,
then the function returns 0
else returns the cited_by_policies_count
In: DOI and col[functionality missing]
Out: single value with either np.nan, 0, or a positive integer
"""
if col != 'cited_by_policies_count':
print('functionality missing for other columns, giving policies now')
if not( | pd.notnull(row) | pandas.notnull |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import re
import os
def get_plot_data(path, span=100):
df = pd.DataFrame()
with open(path + 'test.txt') as file:
data = pd.read_csv(file, index_col=None)
df = df.append(data, ignore_index=True)
df['r'] = df['r'].ewm(span=span).mean()
return df
i = 4
TIMESTEP = 1e6
NSAMPLE = 1e4
GAMES = ['Breakout', 'Seaquest', 'Pong', 'MontezumaRevenge', 'BitFlip']
YMAXS = [600, 2000, 5000, 1, 1, 6000, 17000, 1, 1]
METHODS = ['dqn', 'her-dqn']
res_dir = './res/'
files = os.listdir(res_dir)
sample_list = np.arange(0, TIMESTEP, TIMESTEP/NSAMPLE, dtype=np.int)
df = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, | pd.Series(dtype=object) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from math import sqrt
class portfolio:
'''
The universe and the valid testing period will be defined by the price data.
'''
def __init__(self, weight=None, share=None, benchmark=None, end_date=None, name='Portfolio', benchmark_name='Benchmark', price=None, trading_status=None):
'''
weight: a df with row-names date, col-name security id, value the portfolio weight (not necessarily normalized) of col-security at row-date.
share: a df with row-names date, col-name security id, value the portfolio shares of col-security at row date.
benchmark: a df of benchmark weight or a portfolio object
end_date: date to end backtest
name: the name of the portfolio
benchmark_name: the name of the benchmark
'''
# Price and trading status:
if price is not None:
self.set_price(price, trading_status)
# Construct a portfolio from weight or share:
if weight is not None:
self._weight = weight
self.normalized = False
elif share is not None:
self.share = share
self._weight = self.weight_from_share(share)
else:
raise TypeError('Input at least one of weight or share')
self._end_date = end_date
self.name = name
# Setting benchmark from weight df or portfolio object:
if benchmark is None:
self.benchmark = None
else:
self.set_benchmark(benchmark, benchmark_name)
def set_benchmark(self, benchmark, benchmark_name='Benchmark'):
if isinstance(benchmark, pd.DataFrame):
self.benchmark = portfolio(
weight=benchmark,
name=benchmark_name,
end_date=self.end_date,
price=self.price,
trading_status=self.trading_status
)
elif isinstance(benchmark, portfolio):
self.benchmark = benchmark
self.benchmark.set_price(self.price, self.trading_status)
self.benchmark.end_date= self.end_date
else:
raise TypeError('Unkown benchmark!')
def set_price(self, price, trading_status=None):
'''
price_data: a df with row-names date, col-name security id, value the price of col-security at row-date.
trading_status: a df with row-names date, col-name security id, boolean value indicate if col-security is tradable at row-date.
'''
# Price and trading status is const, should not be change once set.
self.__price = price
if trading_status is None:
self.__trading_status = self.__price.notnull()
else:
trading_status = self._adjust(trading_status)
self.__trading_status = self.__price.notnull() & trading_status
@property
def price(self):
return self.__price
@property
def trading_status(self):
return self.__trading_status
# Utility function to align df with price:
def _adjust(self, df):
assert self.__price is not None, "No price data!"
# Adjust index(dates) withing price.index
out_range_date = df.index.difference(self.__price.index)
if len(out_range_date)>0:
print(f'Skipping outrange dates:\n{[d.strftime("%Y-%m-%d") for d in out_range_date]}')
df = df.loc[df.index & self.__price.index, :]
# Adjust columns(tickers) withing price.columns,
unknown_ticker = df.columns.difference(self.__price.columns)
if len(unknown_ticker)>0:
print(f'Removing unkown tickers:\n{unknown_ticker.values}')
df = df.loc[:, df.columns & self.__price.columns]
return df
@property
def weight(self):
assert self.__price is not None, 'No price data!'
# Normalization process:
if not self.normalized:
self._weight = self._adjust(self._weight)
self._weight = self._weight.where(self.trading_status, other = 0) # Set weight 0 if trading status is false
self._weight = self._weight.divide(self._weight.sum(axis=1), axis=0) # Normalization
self._weight = self._weight.dropna(how='all') # Drop rows with sum==0.
self.normalized= True
return self._weight
def weight_from_share(self, share):
share = self._adjust(share)
price_data = self.__price.copy().loc[share.index, share.columns]
self._weight = self.share * price_data
self.normalized = False
return self.weight
@property
def end_date(self):
if self._end_date is None:
assert self.__price is not None, 'No price data!'
self._end_date = max(self.__price.index)
return self._end_date
@end_date.setter
def end_date(self, value):
self._end_date = value
##################### Backtesting Calculations ####################
@property
def daily_ret(self):
try:
return self._daily_ret
except AttributeError:
self._daily_ret = np.log(self.__price.ffill()/self.__price.ffill().shift(1))
return self._daily_ret
def _drift_weight(self, initial_weight, rebalanced_weight=None, end=None):
'''
initial_weight: weight before rebalance with shape (1, n)
rebalanced_weight: weight after rebalance with shape (1, n), same index as initial weight.
end: end date of the drifting period.
'''
# Prepare end of drifting period:
if end is None:
end = self.end_date
elif end > self.end_date:
print(f'Invalid end date, set to {self.end_date} (portfolio end date)!')
end = self.end_date
###################### Rebalance ########################
# Prepare the initial and rebalanced weight:
assert initial_weight.shape[0]==1, 'Input weight with shape (1,n)'
initial_weight_sum = initial_weight.iloc[0, :].sum()
if initial_weight_sum==1:
pass
elif initial_weight_sum==0:
initial_weight.iloc[0, :] = 0
else:
initial_weight.iloc[0, :] = initial_weight.iloc[0, :]/initial_weight_sum
if rebalanced_weight is None:
rebalanced_weight = initial_weight
else:
assert rebalanced_weight.shape[0]==1, 'Input weight with shape (1,n)'
assert all(initial_weight.index == rebalanced_weight.index), 'Inconsistent weight data!'
# Determine tradable tickers from self.trading_status:
rebalanced_date = initial_weight.index[0]
trading_status = self.trading_status.loc[[rebalanced_date], :]
# Two weight vectors will be calcuate: one for rebalance, one for roll forward
rebalanced_weight = rebalanced_weight.where(trading_status, other=0)
roll_forward_weight = initial_weight.where(~trading_status, other=0)
roll_forward_total = roll_forward_weight.iloc[0, :].sum()
if roll_forward_total<1:
rebalanced_total = rebalanced_weight.iloc[0, :].sum()
adjustment_factor = (1-roll_forward_total)/rebalanced_total
rebalanced_weight = rebalanced_weight*adjustment_factor
rebalanced_weight = rebalanced_weight+roll_forward_weight
else:
rebalanced_weight = roll_forward_weight
assert abs(rebalanced_weight.iloc[0, :].sum()-1)<1e-4, 'Abnormal rebalanced weight!'
######################## Drifting ##################
# Prepare period price data:
period_index = self.__price.index
period_index = period_index[(period_index>=initial_weight.index[0]) & (period_index<=end)]
period_price = self.__price.loc[period_index, :].ffill()
# Total returns:
total_return = period_price/period_price.iloc[0,:]
# Drifting weights:
drift_weight = rebalanced_weight.reindex(period_index).ffill()
drift_weight = drift_weight * total_return
drift_weight = drift_weight.div(drift_weight.sum(axis=1), axis=0).fillna(0)
return drift_weight
@property
def ex_weight(self):
'''
Extend the weight to all dates before self.end_date.
'''
try:
return self._ex_weight
except AttributeError:
# Prepare the index after extention: (From first weight to end date)
extend_period = self.__price.index
extend_period = extend_period[(extend_period>=self.weight.index[0])&(extend_period<=self.end_date)]
extend_weight = self.weight.reindex(extend_period)
# Prepare the tuples for start and end date in each rebalancing period:
rebalance_dates = pd.Series(self.weight.index)
rebalance_start_end = zip(rebalance_dates,rebalance_dates.shift(-1, fill_value= pd.to_datetime(self.end_date)) )
# Initial holdings are all 0:
initial_weight = pd.DataFrame(0, index=[extend_period[0]], columns=self.__price.columns)
# Loop over each rebalancing period:
for start, end in rebalance_start_end:
rebalanced_weight = self.weight.loc[[start], :]
period_weight = self._drift_weight(initial_weight=initial_weight,rebalanced_weight=rebalanced_weight, end=end)
extend_weight.loc[start:end, :] = period_weight
initial_weight = extend_weight.loc[[end], :]
self._ex_weight = extend_weight
return self._ex_weight
@property
def port_daily_ret(self):
try:
return self._port_daily_ret
except AttributeError:
daily_ret = self.daily_ret.copy()
ex_weight = self.ex_weight
daily_ret = daily_ret.loc[daily_ret.index&ex_weight.index, daily_ret.columns&ex_weight.columns]
port_daily_ret_values = np.log((ex_weight.shift(1)*np.exp(daily_ret)).sum(axis=1))
port_daily_ret_values[0] = np.nan
port_daily_ret = | pd.Series(port_daily_ret_values, index=ex_weight.index) | pandas.Series |
"""
utility functions for node classification; dynamic graphs
"""
import argparse
import sys
import pandas as pd
import numpy as np
from scipy.stats import entropy
import random
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import *
from utils import *
from tqdm import tqdm
rnd_seed = 2021
def base_rflr_classification(clf, identifier, X_train, X_test, y_train, y_test, binary):
"""
train the model on the train set and test it on the test set.
to be consistent among different run, the indices are passed.
important NOTE: it is implicitly inferred that the positive label is 1.
"""
# train the model
clf.fit(X_train, y_train)
# predict the training set labels
y_train_pred = clf.predict(X_train)
# predict the test set labels
y_test_pred = clf.predict(X_test)
# evaluate the performance for the training set
tr_perf_dict = perf_report(clf, X_train, y_train, y_train_pred, binary)
ts_perf_dict = perf_report(clf, X_test, y_test, y_test_pred, binary)
perf_dict = {
'ID': identifier,
# train
'train_prec': tr_perf_dict['prec'],
'train_rec': tr_perf_dict['rec'],
'train_f1': tr_perf_dict['f1'],
'train_acc': tr_perf_dict['acc'],
'train_auc_roc': tr_perf_dict['auc_roc'],
# test
'test_prec': ts_perf_dict['prec'],
'test_rec': ts_perf_dict['rec'],
'test_f1': ts_perf_dict['f1'],
'test_acc': ts_perf_dict['acc'],
'test_auc_roc': ts_perf_dict['auc_roc'],
}
return perf_dict
def rf_lr_classification(X_train, X_test, y_train, y_test, stats_file, network, clf_name, binary):
"""
apply classification to input X with label y with "Random Forest" & "Logistic Regression"
:param X_train: train set
:param X_test: test set
:param y_train: train set labels
:param y_test: test set labels
:return the classification results
"""
# define classifier
if clf_name == 'RF':
clf = RandomForestClassifier(n_estimators=50, max_features=10, max_depth=5, random_state=rnd_seed)
# rf_clf = RandomForestClassifier(n_estimators=500, random_state=rnd_seed)
elif clf_name == 'LR':
clf = LogisticRegression(penalty='l1', solver='liblinear', max_iter=1e5, random_state=rnd_seed)
# apply classification
clf_perf = base_rflr_classification(clf, f'{network}_{clf_name}', X_train, X_test, y_train, y_test, binary)
# append the results to file
stats_df = pd.read_csv(stats_file)
stats_df = stats_df.append(clf_perf, ignore_index=True)
stats_df.to_csv(stats_file, index=False)
return clf_perf
def end_to_end_rf_lr_clf(args):
############
# main task
############
network = args.network
n_iter = args.n_runs
clf_name = args.clf
binary = True
# make stats file
stats_filename = f"./logs/{network}_stats_{args.clf}.csv"
stats_file = open(stats_filename, 'w')
header_line = 'ID,train_prec,train_rec,train_f1,train_acc,train_auc_roc,test_prec,test_rec,test_f1,test_acc,' \
'test_auc_roc\n '
stats_file.write(header_line)
stats_file.close()
meta_col_names = ['node', 'label', 'train_mask', 'val_mask', 'test_mask', 'is_anchor']
# read node features
node_feats_filename = f'./data/{network}/{network}_node_feats.csv'
for i in tqdm(range(n_iter)):
node_feats_df = pd.read_csv(node_feats_filename)
# read masks
masks_filename = f'./data/{network}/masks/masks_{i}.csv'
masks_df = pd.read_csv(masks_filename)
node_feats_df = node_feats_df.merge(masks_df, how='inner', on='node')
node_feats_df = node_feats_df.loc[node_feats_df['is_anchor'] == 1]
train_node_feats = node_feats_df.loc[((node_feats_df['train_mask'] == 1) | (node_feats_df['val_mask'] == 1))]
test_node_feats = node_feats_df.loc[node_feats_df['test_mask'] == 1]
y_train = train_node_feats['label'].tolist()
y_test = test_node_feats['label'].tolist()
X_train = train_node_feats.drop(meta_col_names, axis=1)
X_test = test_node_feats.drop(meta_col_names, axis=1)
# scaling
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train.values)
X_test = scaler.transform(X_test.values)
# classification
clf_perf = rf_lr_classification(X_train, X_test, y_train, y_test, stats_filename, network, clf_name, binary)
# append average to the stats
stats_df = | pd.read_csv(stats_filename) | pandas.read_csv |
import os
from nose.tools import *
import unittest
import pandas as pd
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests',
'test_datasets', 'catalog'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class CatalogManagerTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_get_property_valid_df_name_1(self):
# cm.del_catalog()
df = read_csv_metadata(path_a)
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
def test_get_property_valid_df_name_2(self):
# cm.del_catalog()
self.assertEqual(cm.get_catalog_len(), 0)
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_property(C, 'key'), '_id')
self.assertEqual(cm.get_property(C, 'fk_ltable'), 'ltable_ID')
self.assertEqual(cm.get_property(C, 'fk_rtable'), 'rtable_ID')
self.assertEqual(cm.get_property(C, 'ltable').equals(A), True)
self.assertEqual(cm.get_property(C, 'rtable').equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_property_invalid_df_1(self):
cm.get_property(10, 'key')
@raises(AssertionError)
def test_get_property_invalid_path_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
cm.get_property(A, None)
# cm.del_catalog()
@raises(KeyError)
def test_get_property_df_notin_catalog(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.get_property(A, 'key')
# cm.del_catalog()
def test_set_property_valid_df_name_value(self):
# cm.del_catalog()
df = pd.read_csv(path_a)
cm.set_property(df, 'key', 'ID')
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_invalid_df(self):
# cm.del_catalog()
cm.set_property(None, 'key', 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_valid_df_invalid_prop(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.set_property(A, None, 'ID')
# cm.del_catalog()
def test_init_properties_valid(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.init_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), True)
# cm.del_catalog()
@raises(AssertionError)
def test_init_properties_invalid_df(self):
cm.init_properties(None)
def test_get_all_properties_valid_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
m = cm.get_all_properties(A)
self.assertEqual(len(m), 1)
self.assertEqual(m['key'], 'ID')
# cm.del_catalog()
def test_get_all_properties_valid_2(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
m = cm.get_all_properties(C)
self.assertEqual(len(m), 5)
self.assertEqual(m['key'], '_id')
self.assertEqual(m['fk_ltable'], 'ltable_ID')
self.assertEqual(m['fk_rtable'], 'rtable_ID')
self.assertEqual(m['ltable'].equals(A), True)
self.assertEqual(m['rtable'].equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_all_properties_invalid_df_1(self):
# cm.del_catalog()
C = cm.get_all_properties(None)
@raises(KeyError)
def test_get_all_properties_invalid_df_2(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
C = cm.get_all_properties(A)
def test_del_property_valid_df_name(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key')
self.assertEqual(len(cm.get_all_properties(A)), 0)
@raises(AssertionError)
def test_del_property_invalid_df(self):
cm.del_property(None, 'key')
@raises(AssertionError)
def test_del_property_invalid_property(self):
A = read_csv_metadata(path_a)
cm.del_property(A, None)
@raises(KeyError)
def test_del_property_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_property(A, 'key')
@raises(KeyError)
def test_del_property_prop_notin_catalog(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key1')
def test_del_all_properties_valid_1(self):
A = read_csv_metadata(path_a)
cm.del_all_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), False)
def test_del_all_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.del_all_properties(C)
self.assertEqual(cm.is_dfinfo_present(C), False)
@raises(AssertionError)
def test_del_all_properties_invalid_df(self):
cm.del_all_properties(None)
@raises(KeyError)
def test_del_all_properties_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_all_properties(A)
def test_get_catalog_valid(self):
A = read_csv_metadata(path_a)
cg = cm.get_catalog()
self.assertEqual(len(cg), 1)
def test_del_catalog_valid(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
cg = cm.get_catalog()
self.assertEqual(len(cg), 0)
def test_is_catalog_empty(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
self.assertEqual(cm.is_catalog_empty(), True)
def test_is_dfinfo_present_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, True)
def test_is_dfinfo_present_valid_2(self):
A = pd.read_csv(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_dfinfo_present_invalid(self):
cm.is_dfinfo_present(None)
def test_is_property_present_for_df_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key')
self.assertEqual(status, True)
def test_is_property_present_for_df_valid_2(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key1')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_property_present_for_df_invalid_df(self):
cm.is_property_present_for_df(None, 'key')
@raises(KeyError)
def test_is_property_present_for_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.is_property_present_for_df(A, 'key')
def test_catalog_len(self):
A = read_csv_metadata(path_a)
self.assertEqual(cm.get_catalog_len(), 1)
def test_set_properties_valid_1(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.init_properties(B)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
def test_set_properties_valid_2(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
@raises(AssertionError)
def test_set_properties_invalid_df_1(self):
cm.set_properties(None, {})
@raises(AssertionError)
def test_set_properties_invalid_dict_1(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, None)
def test_set_properties_df_notin_catalog_replace_false(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, {}, replace=False)
self.assertEqual(cm.get_key(A), 'ID')
# def test_has_property_valid_1(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key'), True)
#
# def test_has_property_valid_2(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key1'), False)
#
# @raises(AssertionError)
# def test_has_property_invalid_df(self):
# cm.has_property(None, 'key')
#
# @raises(AssertionError)
# def test_has_property_invalid_prop_name(self):
# A = read_csv_metadata(path_a)
# cm.has_property(A, None)
#
# @raises(KeyError)
# def test_has_property_df_notin_catalog(self):
# A = pd.read_csv(path_a)
# cm.has_property(A, 'key')
def test_copy_properties_valid_1(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
self.assertEqual(cm.is_dfinfo_present(A1), True)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
def test_copy_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
C1 = pd.read_csv(path_c)
cm.copy_properties(C, C1)
self.assertEqual(cm.is_dfinfo_present(C1), True)
p = cm.get_all_properties(C1)
p1 = cm.get_all_properties(C1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(C1), cm.get_key(C))
self.assertEqual(cm.get_ltable(C1).equals(A), True)
self.assertEqual(cm.get_rtable(C1).equals(B), True)
self.assertEqual(cm.get_fk_ltable(C1), cm.get_fk_ltable(C))
self.assertEqual(cm.get_fk_rtable(C1), cm.get_fk_rtable(C))
@raises(AssertionError)
def test_copy_properties_invalid_tar_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(A, None)
@raises(AssertionError)
def test_copy_properties_invalid_src_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(None, A)
def test_copy_properties_update_false_1(self):
A = read_csv_metadata(path_a)
A1 = read_csv_metadata(path_a)
status=cm.copy_properties(A, A1, replace=False)
self.assertEqual(status, False)
def test_copy_properties_update_false_2(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1, replace=False)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
@raises(KeyError)
def test_copy_properties_src_df_notin_catalog(self):
A = pd.read_csv(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
def test_get_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_get_key_invalid_df(self):
cm.get_key(None)
@raises(KeyError)
def test_get_key_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.get_key(A)
def test_set_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_set_key_invalid_df(self):
cm.set_key(None, 'ID')
@raises(KeyError)
def test_set_key_notin_df(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID1')
def test_set_key_with_dupids(self):
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_set_key_with_mvals(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_get_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_ltable(C), cm.get_property(C, 'fk_ltable'))
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_get_fk_ltable_invalid_df(self):
cm.get_fk_ltable(None)
def test_get_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_rtable(C), cm.get_property(C, 'fk_rtable'))
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_get_fk_rtable_invalid_df(self):
cm.get_fk_rtable(None)
def test_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_set_fk_ltable_invalid_df(self):
cm.set_fk_ltable(None, 'ltable_ID')
@raises(KeyError)
def test_set_fk_ltable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID1')
def test_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID')
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_set_fk_rtable_invalid_df(self):
cm.set_fk_rtable(None, 'rtable_ID')
@raises(KeyError)
def test_set_fk_rtable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID1')
def test_validate_and_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
def test_validate_and_set_fk_ltable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_ltable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_rtable(C), 'ltable_ID')
def test_validate_and_set_fk_rtable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
# def test_get_reqd_metadata_from_catalog_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, 'key')
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, ['key'])
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_3(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable', 'fk_rtable', 'ltable', 'rtable'])
# self.assertEqual(d['key'], cm.get_key(C))
# self.assertEqual(d['fk_ltable'], cm.get_fk_ltable(C))
# self.assertEqual(d['fk_rtable'], cm.get_fk_rtable(C))
# self.assertEqual(cm.get_ltable(C).equals(A), True)
# self.assertEqual(cm.get_rtable(C).equals(B), True)
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_1(self):
# cm.get_reqd_metadata_from_catalog(None, ['key'])
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_2(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable1', 'fk_rtable', 'ltable', 'rtable'])
#
#
# def test_update_reqd_metadata_with_kwargs_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key'])
# self.assertEqual(metadata['key'], d['key'])
#
# def test_update_reqd_metadata_with_kwargs_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, 'key')
# self.assertEqual(metadata['key'], d['key'])
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(None, d, 'key')
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(d, None, 'key')
#
# @raises(AssertionError)
# def test_update_reqd_metadata_with_kwargs_invalid_elts(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key1'])
# def test_get_diff_with_reqd_metadata_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, 'key1')
# self.assertEqual(len(d1), 1)
#
# def test_get_diff_with_reqd_metadata_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, ['key1'])
# self.assertEqual(len(d1), 1)
#
# @raises(AssertionError)
# def test_get_diff_with_reqd_metadata_invalid_dict(self):
# d1 = cm._get_diff_with_required_metadata(None, ['key1'])
# def test_is_all_reqd_metadata_present_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, 'key'),True)
#
# def test_is_all_reqd_metadata_present_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key']),True)
#
# def test_is_all_reqd_metadata_present_valid_3(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key1']), False)
#
# @raises(AssertionError)
# def test_is_all_reqd_metadata_present_invalid_dict(self):
# cm.is_all_reqd_metadata_present(None, 'key')
def test_show_properties_for_df_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_2(self):
A = pd.read_csv(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties(C)
def test_show_properties_for_objid_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties_for_id(id(A))
@raises(KeyError)
def test_show_properties_for_objid_err_1(self):
A = pd.read_csv(path_a)
cm.show_properties_for_id(id(A))
def test_show_properties_for_objid_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties_for_id(id(C))
def test_validate_metadata_for_table_valid_1(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', None, False)
self.assertEqual(status, True)
def test_validate_metadata_for_table_valid_2(self):
import logging
logger = logging.getLogger(__name__)
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', logger, True)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_table_invalid_df(self):
status = cm._validate_metadata_for_table(None, 'ID', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notin_catalog(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID1', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, None, 'table', None, False)
@raises(AssertionError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'zipcode', 'table', None, False)
def test_validate_metadata_for_candset_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_candset_invalid_df(self):
status = cm._validate_metadata_for_candset(None, '_id', 'ltable_ID', 'rtable_ID', None, None,
'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_id_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, 'id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_ltable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltableID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_rtable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtableID', A, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_ltable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', None, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_rtable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', B, None, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_lkey_notin_ltable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID1', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_rkey_notin_rtable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID1', None, False)
def test_get_keys_for_ltable_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, B, None, False)
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_ltable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(None, B, None, False)
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_rtable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, None, None, False)
def test_get_metadata_for_candset_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(C, None, False)
self.assertEqual(key, '_id')
self.assertEqual(fk_ltable, 'ltable_ID')
self.assertEqual(fk_rtable, 'rtable_ID')
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
self.assertEqual(ltable.equals(A), True)
self.assertEqual(rtable.equals(B), True)
@raises(AssertionError)
def test_get_metadata_for_candset_invalid_df(self):
cm.get_metadata_for_candset(None, None, False)
#--- catalog ---
def test_catalog_singleton_isinstance(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__instancecheck__(object)
@raises(TypeError)
def test_catalog_singleton_call(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__call__()
# -- catalog helper --
def test_check_attrs_present_valid_1(self):
A = | pd.read_csv(path_a) | pandas.read_csv |
import collections
import csv
import tensorflow as tf
from sklearn.metrics import *
import pandas as pd
import numpy as np
from tensorflow.keras.callbacks import Callback
import logging
# Following is a dependency on the ssig package:
#! git clone https://github.com/ipavlopoulos/ssig.git
from ssig import art
def ca_perspective(n=5):
"""
Evaluate PERSPECTIVE with parent-target concatenated.
Scores provided to us.
:param n:
:return:
"""
c = pd.read_csv("data/c_parenttext.csv")
c.set_index(["id"], inplace=True)
data = [pd.read_csv(f"data/standard.622/random_ten/{i}/ic.val.csv") for i in range(n)]
scores = []
for sample in data:
sample["ca_score"] = sample["id"].apply(lambda x: c.loc[x].TOXICITY)
scores.append(roc_auc_score(sample.label, sample.ca_score))
return scores
def persp_vs_capersp(n=5):
c = pd.read_csv("data/c_parenttext.csv")
c.set_index(["id"], inplace=True)
data = [pd.read_csv(f"data/standard.622/random_ten/{i}/ic.val.csv") for i in range(n)]
val = | pd.concat(data) | pandas.concat |
#-*- coding: utf-8 -*-
import pandas as pd
import numpy as np
ACTION_201602_FILE = "data_ori/JData_Action_201602.csv"
ACTION_201603_FILE = "data_ori/JData_Action_201603.csv"
ACTION_201603_EXTRA_FILE = "data_ori/JData_Action_201603_extra.csv"
ACTION_201604_FILE = "data_ori/JData_Action_201604.csv"
COMMENT_FILE = "data/JData_Comment.csv"
PRODUCT_FILE = "data/JData_Product.csv"
USER_FILE = "data/JData_User.csv"
NEW_USER_FILE = "data/JData_User_New.csv"
USER_TABLE_FILE = "data/user_table.csv"
def get_from_action_data(fname, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "type", "time"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
df_ac = df_ac[df_ac['type'] == 4]
return df_ac[["user_id", "sku_id", "time"]]
def merge_weekday_action_data():
df_ac = []
df_ac.append(get_from_action_data(fname=ACTION_201602_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201603_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201603_EXTRA_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201604_FILE))
df_ac = pd.concat(df_ac, ignore_index=True)
# data type
print(df_ac)
print(df_ac.dtypes)
# Monday = 0, Sunday = 6
df_ac['time'] = pd.to_datetime(
df_ac['time']).apply(lambda x: x.weekday() + 1)
df_user = df_ac.groupby('time')['user_id'].nunique()
# df_ac = pd.DataFrame({'weekday': df_ac.index, 'user_num': df_ac.values})
df_user = df_user.to_frame().reset_index()
df_user.columns = ['weekday', 'user_num']
print(df_user)
df_item = df_ac.groupby('time')['sku_id'].nunique()
df_item = df_item.to_frame().reset_index()
df_item.columns = ['weekday', 'item_num']
print(df_item)
df_ui = df_ac.groupby('time', as_index=False).size()
df_ui = df_ui.to_frame().reset_index()
df_ui.columns = ['weekday', 'user_item_num']
print(df_ui)
def month_action_data_statistic():
# Feb.
df_ac = get_from_action_data(fname=ACTION_201602_FILE)
df_ac['time'] = pd.to_datetime(df_ac['time']).apply(lambda x: x.day)
# March
df_ac = []
df_ac.append(get_from_action_data(fname=ACTION_201603_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201603_EXTRA_FILE))
df_ac = | pd.concat(df_ac, ignore_index=True) | pandas.concat |
"""
This module implements several methods for calculating and outputting solutions of the unionfind_cluster_editing() algorithm.
It contains two methods for the (best) generated raw solutions,
and, more importantly, methods to merge solutions into one better solution.
"""
from union_find import *
from math import log
import sys
import numpy as np
from numba import njit, jit
from numpy import random as rand
from model_sqrt import *
from numba.typed import Dict
import pandas as pd
def best_solution(solution_costs, parents, filename, missing_weight, n, x):
"""
This function outputs the best generated solution to a file named "result.txt".
"""
costs = solution_costs.min()
best = parents[solution_costs.argmin()]
file = open("result.txt", mode="a")
with file:
file.write("filename: %s \nmissing_weight: %f \nn: %d \nx (solutions generated): %d\nbest solution found:\n" % (filename, missing_weight, n, x))
file.write(f"costs: {costs}\n")
for i in range(0,n):
file.write(f"{best[i]} ")
def print_solution_costs(solution_costs, filename):
"""
This function outputs all sorted solution costs to a ifle named "..._solution_costs.txt".
"""
sorted_costs = np.sort(solution_costs)
print_to = filename[:-4] + "_solution_costs_v5.txt"
with open(print_to, mode="a") as file:
for cost in sorted_costs:
file.write(str(cost))
file.write("\n")
def all_solutions(solution_costs, parents, filename, missing_weight, n):
"""
This function outputs all solutions, sorted by their costs, to a ifle named "all_solutions.txt".
"""
cost_sorted_i = np.argsort(solution_costs)
print_to = filename[:-4] + "_all_solutions_v5.txt"
count = 1
with open(print_to, mode="a") as file:
file.write("filename: %s \nmissing_weight: %f \nn: %d\n" % (filename, missing_weight, n))
for i in cost_sorted_i:
file.write("%d. best solution with cost %f\n" % (count, solution_costs[i]))
count += 1
for j in range(0,n):
file.write(f"{parents[i, j]} ")
file.write("\n")
@njit
def weighted_decision(x, y, cluster_masks, f_vertex_costs, f_sizes, f_parents):
"""
This function is a helper function for merging functions. It generates a weight for cluster center x and another node y by counting the costs over all solutions for two scenarios:
1: y is in the same cluster as x
0: y is in another cluster
The return value is between -1 and 1, -1 for certainly not connected, 1 for certainly connected. A value of 0 would indicate that connected or not connected would (in mean) yield the same costs (as in: the error is not big enough to make a difference).
"""
sol_len = len(f_parents)
sum_for_0 = 0
sum_for_1 = 0
count_0 = 0
count_1 = 0
for i in range(0,sol_len):
x_cost = f_vertex_costs[i, x]
y_cost = f_vertex_costs[i, y]
if cluster_masks[i, y] == 0:
sum_for_0 += x_cost + y_cost
count_0 += 1
else:
sum_for_1 += x_cost + y_cost
count_1 += 1
if count_0 > 0:
cost_0 = sum_for_0/count_0
if count_1 > 0:
cost_1 = sum_for_1/count_1
if cost_0 == 0 and cost_1 == 0:
print("Warning: Both together and single get cost 0 - something went wrong!")
else:
return (cost_0 - cost_1) / (cost_0 + cost_1)
else:
# Falls kein Eintrag 1 gehört Knoten recht sicher nicht zum Cluster
return -1.0
else:
# Falls kein Eintrag 0 gehört Knoten recht sicher zum Cluster
return 1.0
# Falls Rückgabe positiv: Entscheidung für 1 (zusammen), falls negativ: Entscheidung für 0 (getrennt).
# Je näher Rückgabewert an 0, desto unsicherer die Entscheidung.
# Falls kein voriger Fall eintritt (Häufigkeit entscheidet/ Verhältnis liegt vor):
return 0.0
@njit
def merged_solution(solution_costs, vertex_costs, parents, sizes, missing_weight, n):
"""
First merge algorithm. It calculates cluster masks for each cluster center:
True, if the node is in the same component with cluster center,
False otherwise.
For these cluster masks, for each cluster center x and each other node y a weighted decision value is calculated. Is this weight better than the previous one, y gets assigned to new cluster center x. X then gets the weight of the maximum weight over all y, except if that is lower than its previous weight. Tree-like structures can emerge in such cases. Those trees are not handled yet, however they indicate a conflict in the solution, as a node that is both child and parent belongs to two distinct clusters.
"""
sol_len = len(solution_costs)
# Neue Lösung als Array anlegen:
merged_sol = np.arange(n) #dtype = np.int64 not supported by numba
# Arrays anlegen für Vergleichbarkeit der Cluster:
cluster_masks = np.zeros((sol_len,n), dtype=np.int8) #np.bool not supported
for j in range(n):
# Fülle Cluster-Masken
for i in range(sol_len):
# Jede Cluster-Maske enthält "True" überall, wo parents
# denselben Wert hat wie an Stelle j, sonst "False"
for k in range(n):
cluster_masks[i, k] = np.int8(parents[i, k] == parents[i, j])
# Berechne Zugehörigkeit zu Cluster (bzw. oder Nicht-Zugehörigkeit)
# Alle vorigen Knoten waren schon als Zentrum besucht und haben diesen Knoten daher schon mit sich verbunden (bzw. eben nicht) - Symmetrie der Kosten!
for k in range(j+1,n):
# Cluster-Zentrum wird übersprungen (dh. verweist möglicherweise noch auf anderes Cluster!)
if k == j:
continue
wd = weighted_decision(j, k, cluster_masks, vertex_costs, sizes, parents)
# Falls Gewicht groß genug:
if wd > 0.05:
rem_union(j, k, merged_sol)
return merged_sol
@njit
def weighted_decision_scan(x, y, connectivity, f_vertex_costs, f_sizes, f_parents):
"""
This function is a helper function for merging functions. It generates a weight for cluster center x and another node y by counting the costs over all solutions for two scenarios:
1: y is in the same cluster as x
0: y is in another cluster
The return value is between -1 and 1, -1 for certainly not connected, 1 for certainly connected. A value of 0 would indicate that connected or not connected would (in mean) yield the same costs (as in: the error is not big enough to make a difference).
"""
sol_len = len(f_parents)
sum_for_0 = 0
sum_for_1 = 0
count_0 = 0
count_1 = 0
for i in range(0,sol_len):
x_cost = f_vertex_costs[i, x]
y_cost = f_vertex_costs[i, y]
if connectivity[i]:
sum_for_1 += x_cost + y_cost
count_1 += 1
else:
sum_for_0 += x_cost + y_cost
count_0 += 1
if count_0 > 0:
cost_0 = sum_for_0/count_0
if count_1 > 0:
cost_1 = sum_for_1/count_1
if cost_0 == 0 and cost_1 == 0:
print("Warning: Both together and single get cost 0 - something went wrong!")
else:
return (cost_0 - cost_1) / (cost_0 + cost_1)
else:
# Falls kein Eintrag 1 gehört Knoten recht sicher nicht zum Cluster
return -1.0
else:
# Falls kein Eintrag 0 gehört Knoten recht sicher zum Cluster
return 1.0
# Falls Rückgabe positiv: Entscheidung für 1 (zusammen), falls negativ: Entscheidung für 0 (getrennt).
# Je näher Rückgabewert an 0, desto unsicherer die Entscheidung.
# Falls kein voriger Fall eintritt (Häufigkeit entscheidet/ Verhältnis liegt vor):
return 0.0
def merged_solution_scan(solution_costs, vertex_costs, parents, sizes, missing_weight, n, filename):
"""
First merge algorithm. It calculates cluster masks for each cluster center:
True, if the node is in the same component with cluster center,
False otherwise.
For these cluster masks, for each cluster center x and each other node y a weighted decision value is calculated. Is this weight better than the previous one, y gets assigned to new cluster center x. X then gets the weight of the maximum weight over all y, except if that is lower than its previous weight. Tree-like structures can emerge in such cases. Those trees are not handled yet, however they indicate a conflict in the solution, as a node that is both child and parent belongs to two distinct clusters.
"""
sol_len = len(solution_costs)
# Neue Lösung als Array anlegen:
merged_sol = np.arange(n) #dtype = np.int64 not supported by numba
merged_sizes = np.ones(n, dtype=np.int64)
# Arrays anlegen für Vergleichbarkeit der Cluster:
connectivity = np.zeros(sol_len, dtype=np.int8) #np.bool not supported
graph_file = open(filename, mode="r")
for line in graph_file:
# Kommentar-Zeilen überspringen
if line[0] == "#":
continue
splitted = line.split()
nodes = np.array(splitted[:-1], dtype=np.int64)
weight = np.float64(splitted[2])
i = nodes[0]
j = nodes[1]
if weight < 0:
continue
# Fülle Cluster-Masken
for x in range(sol_len):
connectivity[x] = np.int8(parents[x, i] == parents[x, j])
# Berechne Zugehörigkeit zu Cluster (bzw. oder Nicht-Zugehörigkeit)
# Alle vorigen Knoten waren schon als Zentrum besucht und haben diesen Knoten daher schon mit sich verbunden (bzw. eben nicht) - Symmetrie der Kosten!
wd = weighted_decision_scan(i, j, connectivity, vertex_costs, sizes, parents)
# Falls Gewicht groß genug:
if wd > 0.05:
rem_union(i, j, merged_sol)
return merged_sol
@njit
def repair_merged(merged, merged_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
sol_len = len(solution_costs)
# Arrays anlegen für Vergleichbarkeit der Cluster:
cluster_masks = np.zeros((sol_len,n), dtype=np.int8) #np.bool not supported
for i in range(n):
# Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
# Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Probleminstanz.
if merged[i] == i and merged_sizes[i] < 0.5*node_dgree[i]:
max_wd = -1
best_fit = i
# Fülle Cluster-Masken
for x in range(0,sol_len):
for j in range(n):
# Jede Cluster-Maske enthält "True" überall, wo parents
# denselben Wert hat wie an Stelle j, sonst "False"
cluster_masks[x, j] = np.int8(parents[x, i] == parents[x, j])
for j in range(n):
# Überspringe bereits verbundene Knoten und sich selbst
if merged[i] == merged[j]:
continue
# Berechne Gewicht:
wd = weighted_decision(i, j, cluster_masks, vertex_costs, sizes, parents)
# Aktualisiere ggf. best-passenden Knoten
if wd > max_wd:
max_wd = wd
best_fit = j
# ggf. Modifikation, nur union falls auch max_wd passt.
#if max_wd > 0.1:
union(i, best_fit, merged, merged_sizes)
result = np.zeros((2,n), dtype=np.int64)
result[0] = merged
result[1] = merged_sizes
return result
def get_cluster_centers_big(merged, merged_sizes, node_dgree, split):
big_ccs = {}
for i in range(len(merged)):
if merged_sizes[merged[i]] >= node_dgree[merged[i]] * split:
big_ccs[merged[i]] = merged_sizes[merged[i]]
return big_ccs
def get_cluster_centers_small(merged, merged_sizes, node_dgree, split):
small_ccs = {}
for i in range(len(merged)):
if merged_sizes[merged[i]] < node_dgree[merged[i]] * split:
small_ccs[merged[i]] = merged_sizes[merged[i]]
return small_ccs
def get_second_center(merged, big_ccs):
second_cc = {}
for center in big_ccs.keys():
# Durchlaufe solange andere Knoten bis einer aus dem selben Cluster gefunden wurde
for i in range(len(merged)):
# nicht der selbe Knoten ist gesucht
if i == center:
continue
# sondern der erste, der einen anderen Index hat aber den selben Eintrag:
if merged[i] == merged[center]:
second_cc[center] = i
break
return second_cc
@njit
def weighted_decision_2(s_center, b_center, sb_center, connectivity, vertex_costs, sizes, parents):
costs_0 = 0.0
costs_1 = 0.0
count_0 = 0
count_1 = 0
for x in range(0, len(connectivity)):
if connectivity[x] == -1:
costs_1 += 0.5 * vertex_costs[x, s_center] + vertex_costs[x, b_center] + vertex_costs[x, b_center]
elif connectivity[x] == -2:
costs_1 += 0.5 * vertex_costs[x, s_center] + vertex_costs[x, sb_center] + vertex_costs[x, sb_center]
elif connectivity[x] == 1:
costs_1 += vertex_costs[x, s_center] + vertex_costs[x, b_center] + vertex_costs[x, sb_center]
count_1 += 1
else:
costs_0 += vertex_costs[x, s_center] + vertex_costs[x, b_center] + vertex_costs[x, sb_center]
count_0 += 1
if count_0 > 0:
cost_0 = costs_0/count_0
if count_1 > 0:
cost_1 = costs_1/count_1
if cost_0 == 0 and cost_1 == 0:
print("Warning: Both together and single get cost 0 - something went wrong!")
else:
return (cost_0 - cost_1) / (cost_0 + cost_1)
else:
# Falls kein Eintrag 1, gehört Knoten recht sicher nicht zum Cluster
return -1.0
else:
# Falls kein Eintrag 0, gehört Knoten recht sicher zum Cluster
return 1.0
def repair_merged_v2(merged, merged_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
sol_len = len(solution_costs)
# Arrays anlegen für Vergleichbarkeit der Cluster:
connectivity = np.zeros(sol_len, dtype=np.int8) #np.bool not supported
big_ccs = get_cluster_centers_big(merged, merged_sizes, node_dgree, 0.3)
small_ccs = get_cluster_centers_small(merged, merged_sizes, node_dgree, 0.3)
second_big_cc = get_second_center(merged, big_ccs)
for s_center in small_ccs.keys():
# Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
# Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Probleminstanz.
max_wd = -1
best_fit = s_center
# Fülle connectivity-Array (0: keine Verbindung zu Cluster; 1: eine Verbindung, 2: zwei Verbindungen)
for b_center in big_ccs.keys():
# Falls Cluster zusammen deutlich zu groß wären, überspringe diese Kombination direkt
if merged_sizes[s_center] + merged_sizes[b_center] > 1.5 * node_dgree[b_center]:
continue
for x in range(0,sol_len):
if parents[x, b_center] != parents[x, second_big_cc[b_center]]:
connectivity[x] = -1
continue
if parents[x, s_center] == parents[x, b_center]:
connectivity[x] = 1
else:
connectivity[x] = 0
# Berechne Gewicht:
wd = weighted_decision_2(s_center, b_center, second_big_cc[b_center], connectivity, vertex_costs, sizes, parents)
# Aktualisiere ggf. best-passenden Knoten
if wd > max_wd:
max_wd = wd
best_fit = b_center
# ggf. Modifikation, nur union falls auch max_wd passt.
if max_wd > 0.05:
union(s_center, best_fit, merged, merged_sizes)
result = np.zeros((2,n), dtype=np.int64)
result[0] = merged
result[1] = merged_sizes
return result
def repair_merged_v3(merged, merged_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
sol_len = len(solution_costs)
ccs = calculate_mean_nodedgr(merged, merged_sizes, node_dgree)
second_big_cc = get_second_center(merged, ccs)
connectivity = np.zeros(sol_len, dtype=np.int8)
for s_center in ccs.keys():
# s_center soll klein genug sein
if merged_sizes[s_center] > ccs[s_center] * 0.35:
continue
# Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
# Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Probleminstanz.
best_fit = s_center
max_wd = -0.05
for b_center in ccs.keys():
# b_center soll groß genug sein
if merged_sizes[b_center] <= ccs[b_center] * 0.35:
continue
# Falls Cluster zusammen deutlich zu groß wären, überspringe diese Kombination direkt
if merged_sizes[s_center] + merged_sizes[b_center] > 1.5 * ccs[b_center]:
continue
for x in range(0,sol_len):
if parents[x, b_center] != parents[x, second_big_cc[b_center]]:
connectivity[x] = -1
continue
if parents[x, s_center] == parents[x, b_center]:
connectivity[x] = 1
else:
connectivity[x] = 0
# Berechne Gewicht:
wd = weighted_decision_2(s_center, b_center, second_big_cc[b_center], connectivity, vertex_costs, sizes, parents)
# Aktualisiere ggf. best-passenden Knoten
if wd > max_wd:
max_wd = wd
best_fit = b_center
# Verbinde das Cluster mit dem Cluster, das lokal betrachtet die geringsten Knotenkosten einbrachte.
union(s_center, best_fit, merged, merged_sizes)
result = np.zeros((2,n), dtype=np.int64)
result[0] = merged
result[1] = merged_sizes
return result
@njit
def repair_merged_v3_nd(merged, merged_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree):
sol_len = len(solution_costs)
ccs_mndgr = calculate_mean_nodedgr_nd(merged, merged_sizes, node_dgree)
ccs = ccs_mndgr[0]
mean_ndgree = ccs_mndgr[1]
second_big_cc = get_second_center_nd(merged, ccs)
connectivity = np.zeros(sol_len, dtype=np.int8)
for s_center_i in range(len(ccs)):
# s_center soll klein genug sein
s_center = ccs[s_center_i]
if merged_sizes[s_center] > mean_ndgree[s_center_i] * 0.35:
continue
# Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden);
# Reparatur wird versucht, wenn die Größe des Clusters weniger als halb so groß ist wie der Knotengrad angibt, dh. die lokale Fehlerrate wäre bei über 50% in der Probleminstanz.
best_fit = s_center
max_wd = 0
for b_center_i in range(len(ccs)):
# b_center soll groß genug sein
b_center = ccs[b_center_i]
if merged_sizes[b_center] <= mean_ndgree[b_center_i] * 0.35:
continue
# Falls Cluster zusammen deutlich zu groß wären, überspringt diese Kombination direkt
if merged_sizes[s_center] + merged_sizes[b_center] > 1.5 * mean_ndgree[b_center_i]:
continue
for x in range(0,sol_len):
# Unterscheide vier Fälle: -1/-2: s_center nur mit einem verbunden; 1: mit beiden; 0: mit keinem
if parents[x, b_center] != parents[x, second_big_cc[b_center_i]]:
if parents[x, s_center] == parents[x, b_center]:
connectivity[x] = -1
elif parents[x, s_center] == parents[x, second_big_cc[b_center_i]]:
connectivity[x] = -2
continue
if parents[x, s_center] == parents[x, b_center]:
connectivity[x] = 1
else:
connectivity[x] = 0
# Berechne Gewicht:
wd = weighted_decision_2(s_center, b_center, second_big_cc[b_center_i], connectivity, vertex_costs, sizes, parents)
# Aktualisiere ggf. best-passenden Knoten
if wd > max_wd:
max_wd = wd
best_fit = b_center
# Verbinde das Cluster mit dem Cluster, das lokal betrachtet die geringsten Knotenkosten einbrachte.
union(s_center, best_fit, merged, merged_sizes)
result = np.zeros((2,n), dtype=np.int64)
result[0] = merged
result[1] = merged_sizes
return result
@njit
def mean_weight_connected(s_center, connectivity, vertex_costs, sizes, parents):
sol_len = len(connectivity)
mwc = 0.0
count = 0
for i in range(sol_len):
if connectivity[i]:
mwc += vertex_costs[i, s_center]
count += 1
if count == 0:
return -1.0
return mwc/count
@njit
def mean_weight_connected2(s_center, b_center, connectivity, vertex_costs, sizes, parents):
sol_len = len(connectivity)
mwc = 0.0
mwd = 0.0
count = 0
countd = 0
for i in range(sol_len):
if connectivity[i]:
mwc += vertex_costs[i, s_center] + vertex_costs[i, b_center]
count += 1
else:
mwd += vertex_costs[i, s_center] + vertex_costs[i, b_center]
countd += 1
if count == 0:
return -1.0
elif countd == 0:
return 1
cost_1 = mwc/count
cost_0 = mwd/countd
return (cost_0 - cost_1) / (cost_0 + cost_1)
@njit
def repair_merged_v4_nd_rem(merged, merged_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree, big_border):
sol_len = len(solution_costs)
ccs_mndgr = calculate_mean_nodedgr_nd(merged, merged_sizes, node_dgree)
ccs = ccs_mndgr[0]
mean_ndgree = ccs_mndgr[1]
connectivity = np.zeros(sol_len, dtype=np.int8)
for s_center_i in range(len(ccs)):
# s_center soll klein genug sein
s_center = ccs[s_center_i]
if merged_sizes[s_center] > mean_ndgree[s_center_i] * big_border:
continue
# Detektiere und verbinde "Mini-Cluster" (Wurzel des Clusters soll verbunden werden).
best_fit = s_center
min_mwc = 1.7976931348623157e+308
for b_center_i in range(len(ccs)):
# b_center soll groß genug sein
b_center = ccs[b_center_i]
if merged_sizes[b_center] <= mean_ndgree[b_center_i] * big_border:
continue
# Falls Cluster zusammen deutlich zu groß wären, überspringt diese Kombination direkt.
# zu groß: mehr als 0.29 zusätzlich
# wegen 2/9 Fehlerrate maximal die von den 7/9 übrigen Kanten jeweils fehlen darf.
if merged_sizes[s_center] + merged_sizes[b_center] > 1.29 * mean_ndgree[b_center_i]:
continue
for x in range(0,sol_len):
if parents[x, s_center] == parents[x, b_center]:
connectivity[x] = 1
else:
connectivity[x] = 0
# Berechne Gewicht:
mwc = mean_weight_connected(s_center, connectivity, vertex_costs, sizes, parents)
# Aktualisiere ggf. best-passenden Knoten und minimalen mwc
if mwc == -1:
continue
if mwc < min_mwc:
min_mwc = mwc
best_fit = b_center
# Verbinde das Cluster mit dem Cluster, das im Mittel für s_center am günstigsten ist.
rem_union(s_center, best_fit, merged)
# Wg. Rem: aktualisiere Größe direkt in Repräsentanten von später erneut betrachtetem best_fit
merged_sizes[best_fit] += merged_sizes[s_center]
return merged
@njit
def calculate_mean_nodedgr_array(merged, merged_sizes, node_dgree, cluster_centers):
cluster_mean_nodedgr = np.zeros(len(cluster_centers), dtype=np.int64)
for c in range(len(cluster_centers)):
for i in range(len(merged)):
if merged[i] == cluster_centers[c]:
cluster_mean_nodedgr[c] += node_dgree[i]
cluster_mean_nodedgr[c] /= merged_sizes[cluster_centers[c]]
cmn_array = np.zeros(len(merged), dtype=np.int64)
for i in range(len(cluster_centers)):
c = cluster_centers[i]
cmn_array[c] = cluster_mean_nodedgr[i]
return cmn_array
def repair_merged_v4_rem_scan(merged, merged_sizes, solution_costs, vertex_costs, parents, sizes, n, node_dgree, big_border, filename):
sol_len = len(solution_costs)
cluster_centers = | pd.unique(merged) | pandas.unique |
import pytest
import pandas as pd
import pypipegraph as ppg
from mbf_genomics import genes, DelayedDataFrame
from mbf_genomics.testing import MockGenome
from pypipegraph.testing import force_load
from pathlib import Path
@pytest.mark.usefixtures("new_pipegraph")
class TestDescription:
def test_simple(self):
genome = MockGenome(
pd.DataFrame(
{
"stable_id": ["a", "b", "c"],
"chr": "1",
"tss": [0, 100, 1000],
"tes": [10, 101, 1010],
}
),
df_genes_meta=pd.DataFrame(
{
"gene_stable_id": ["a", "b", "c"],
"description": ["hello", "world", "!"],
}
).set_index("gene_stable_id"),
)
g = genes.Genes(genome)
anno = genes.annotators.Description()
g += anno
force_load(g.annotate())
ppg.run_pipegraph()
assert "description" in g.df.columns
assert (
g.df.sort_values("gene_stable_id")["description"] == ["hello", "world", "!"]
).all()
def test_external_genome(self):
genome = MockGenome(
pd.DataFrame(
{
"stable_id": ["a", "b", "c"],
"chr": "1",
"tss": [0, 100, 1000],
"tes": [10, 101, 1010],
}
),
df_genes_meta=pd.DataFrame(
{
"gene_stable_id": ["a", "b", "c"],
"description": ["hello", "world", "!"],
}
).set_index("gene_stable_id"),
)
g = DelayedDataFrame("ex", | pd.DataFrame({"gene_stable_id": ["a", "c", "b"]}) | pandas.DataFrame |
# -*- encoding:utf-8 -*-
"""
中间层,从上层拿到x,y,df
拥有create estimator
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import functools
from enum import Enum
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, ClassifierMixin, RegressorMixin, clone
from sklearn import metrics
from sklearn.datasets import load_iris
from sklearn.feature_selection import RFE, VarianceThreshold
from sklearn.preprocessing import label_binarize, StandardScaler, binarize
from . import ABuMLExecute
from .ABuMLCreater import AbuMLCreater
from ..CoreBu import ABuEnv
from ..CoreBu.ABuFixes import train_test_split, cross_val_score, mean_squared_error_scorer, six
from ..UtilBu import ABuFileUtil
from ..UtilBu.ABuProgress import AbuProgress
from ..UtilBu.ABuDTUtil import warnings_filter
from ..UtilBu.ABuDTUtil import params_to_numpy
from ..CoreBu.ABuFixes import signature
__author__ = '阿布'
__weixin__ = 'abu_quant'
p_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir))
ML_TEST_FILE = os.path.join(p_dir, 'RomDataBu/ml_test.csv')
class _EMLScoreType(Enum):
"""针对有监督学习的度量支持enum"""
"""有监督学习度量准确率"""
E_SCORE_ACCURACY = 'accuracy'
"""有监督学习度量mse"""
E_SCORE_MSE = mean_squared_error_scorer
"""有监督学习度量roc_auc"""
E_SCORE_ROC_AUC = 'roc_auc'
class EMLFitType(Enum):
"""支持常使用的学习器类别enum"""
"""有监督学习:自动选择,根据y的label数量,> 10使用回归否则使用分类"""
E_FIT_AUTO = 'auto'
"""有监督学习:回归"""
E_FIT_REG = 'reg'
"""有监督学习:分类"""
E_FIT_CLF = 'clf'
"""无监督学习:HMM"""
E_FIT_HMM = 'hmm'
"""无监督学习:PCA"""
E_FIT_PCA = 'pca'
"""无监督学习:KMEAN"""
E_FIT_KMEAN = 'kmean'
def entry_wrapper(support=(EMLFitType.E_FIT_CLF, EMLFitType.E_FIT_REG, EMLFitType.E_FIT_HMM,
EMLFitType.E_FIT_PCA, EMLFitType.E_FIT_KMEAN)):
"""
类装饰器函数,对关键字参数中的fiter_type进行标准化,eg,fiter_type参数是'clf', 转换为EMLFitType(fiter_type)
赋予self.fiter_type,检测当前使用的具体学习器不在support参数中不执行被装饰的func函数了,打个log返回
:param support: 默认 support=(EMLFitType.E_FIT_CLF, EMLFitType.E_FIT_REG, EMLFitType.E_FIT_HMM,
EMLFitType.E_FIT_PCA, EMLFitType.E_FIT_KMEAN)
即支持所有,被装饰的函数根据自身特性选择装饰参数
"""
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
org_fiter_type = self.fiter_type
if 'fiter_type' in kwargs:
# 如果传递了fiter_type参数,pop出来
fiter_type = kwargs.pop('fiter_type')
# 如果传递的fiter_type参数是str,eg:'clf', 转换为EMLFitType(fiter_type)
if isinstance(fiter_type, six.string_types):
fiter_type = EMLFitType(fiter_type)
self.fiter_type = fiter_type
check_support = self.fiter_type
if self.fiter_type == EMLFitType.E_FIT_AUTO:
# 把auto的归到具体的分类或者回归
check_y = self.y
if 'y' in kwargs:
check_y = kwargs['y']
check_support = EMLFitType.E_FIT_CLF if len(np.unique(check_y)) <= 10 else EMLFitType.E_FIT_REG
if check_support not in support:
# 当前使用的具体学习器不在support参数中不执行被装饰的func函数了,打个log返回
self.log_func('{} not support {}!'.format(func.__name__, check_support.value))
# 如果没能成功执行把类型再切换回来
self.fiter_type = org_fiter_type
return
return func(self, *args, **kwargs)
return wrapper
return decorate
# noinspection PyUnresolvedReferences
class AbuML(object):
"""封装有简单学习及无监督学习方法以及相关操作类"""
@classmethod
def create_test_fiter(cls):
"""
类方法:使用iris数据构造AbuML对象,测试接口,通过简单iris数据对方法以及策略进行验证
iris数据量小,如需要更多数据进行接口测试可使用create_test_more_fiter接口
eg: iris_abu = AbuML.create_test_fiter()
:return: AbuML(x, y, df),
eg: df
y x0 x1 x2 x3
0 0 5.1 3.5 1.4 0.2
1 0 4.9 3.0 1.4 0.2
2 0 4.7 3.2 1.3 0.2
3 0 4.6 3.1 1.5 0.2
4 0 5.0 3.6 1.4 0.2
.. .. ... ... ... ...
145 2 6.7 3.0 5.2 2.3
146 2 6.3 2.5 5.0 1.9
147 2 6.5 3.0 5.2 2.0
148 2 6.2 3.4 5.4 2.3
149 2 5.9 3.0 5.1 1.8
"""
iris = load_iris()
x = iris.data
"""
eg: iris.data
array([[ 5.1, 3.5, 1.4, 0.2],
[ 4.9, 3. , 1.4, 0.2],
[ 4.7, 3.2, 1.3, 0.2],
[ 4.6, 3.1, 1.5, 0.2],
[ 5. , 3.6, 1.4, 0.2],
....... ....... .......
[ 6.7, 3. , 5.2, 2.3],
[ 6.3, 2.5, 5. , 1.9],
[ 6.5, 3. , 5.2, 2. ],
[ 6.2, 3.4, 5.4, 2.3],
[ 5.9, 3. , 5.1, 1.8]])
"""
y = iris.target
"""
eg: y
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
"""
x_df = pd.DataFrame(x, columns=['x0', 'x1', 'x2', 'x3'])
y_df = pd.DataFrame(y, columns=['y'])
df = y_df.join(x_df)
return AbuML(x, y, df)
@classmethod
def load_ttn_raw_df(cls):
"""
读取泰坦尼克测试数据
:return: pd.DataFrame对象,from接口pd.read_csv(train_csv_path)
"""
train_csv_path = ML_TEST_FILE
if not ABuFileUtil.file_exist(train_csv_path):
# 泰坦尼克数据文件如果不存在RuntimeError
raise RuntimeError('{} not exist, please down a ml_test.csv!'.format(train_csv_path))
# 训练文件使用read_csv从文件读取
return pd.read_csv(train_csv_path)
@classmethod
@warnings_filter
def create_test_more_fiter(cls):
"""
类方法:使用泰坦尼克数据构造AbuML对象,测试接口,对方法以及策略进行验证 比iris数据多
eg: ttn_abu = AbuML.create_test_more_fiter()
:return: AbuML(x, y, df),构造AbuML最终的泰坦尼克数据形式如:
eg: df
Survived SibSp Parch Cabin_No Cabin_Yes Embarked_C Embarked_Q \
0 0 1 0 1 0 0 0
1 1 1 0 0 1 1 0
2 1 0 0 1 0 0 0
3 1 1 0 0 1 0 0
4 0 0 0 1 0 0 0
5 0 0 0 1 0 0 1
6 0 0 0 0 1 0 0
7 0 3 1 1 0 0 0
8 1 0 2 1 0 0 0
9 1 1 0 1 0 1 0
.. ... ... ... ... ... ... ...
Embarked_S Sex_female Sex_male Pclass_1 Pclass_2 Pclass_3 \
0 1 0 1 0 0 1
1 0 1 0 1 0 0
2 1 1 0 0 0 1
3 1 1 0 1 0 0
4 1 0 1 0 0 1
5 0 0 1 0 0 1
6 1 0 1 1 0 0
7 1 0 1 0 0 1
8 1 1 0 0 0 1
9 0 1 0 0 1 0
.. ... ... ... ... ... ...
Age_scaled Fare_scaled
0 -0.5614 -0.5024
1 0.6132 0.7868
2 -0.2677 -0.4889
3 0.3930 0.4207
4 0.3930 -0.4863
5 -0.4271 -0.4781
6 1.7877 0.3958
7 -2.0295 -0.2241
8 -0.1943 -0.4243
.. ... ...
"""
raw_df = cls.load_ttn_raw_df()
def set_missing_ages(p_df):
"""
对数据中缺失的年龄使用RandomForestRegressor进行填充
"""
from sklearn.ensemble import RandomForestRegressor
age_df = p_df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
y_inner = known_age[:, 0]
x_inner = known_age[:, 1:]
rfr_inner = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr_inner.fit(x_inner, y_inner)
predicted_ages = rfr_inner.predict(unknown_age[:, 1::])
p_df.loc[(p_df.Age.isnull()), 'Age'] = predicted_ages
return p_df, rfr_inner
def set_cabin_type(p_df):
"""
对数据中缺失的Cabin处理
"""
p_df.loc[(p_df.Cabin.notnull()), 'Cabin'] = "Yes"
p_df.loc[(p_df.Cabin.isnull()), 'Cabin'] = "No"
return p_df
raw_df, rfr = set_missing_ages(raw_df)
raw_df = set_cabin_type(raw_df)
# 对多label使用get_dummies进行离散二值化处理
dummies_cabin = | pd.get_dummies(raw_df['Cabin'], prefix='Cabin') | pandas.get_dummies |
from unittest.mock import patch
import featuretools as ft
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from blocktorch.pipelines.components import DFSTransformer
def test_index_errors(X_y_binary):
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=0)
with pytest.raises(TypeError, match="Index provided must be string"):
DFSTransformer(index=None)
def test_numeric_columns(X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
feature = DFSTransformer()
feature.fit(X_pd, y)
feature.transform(X_pd)
@patch("blocktorch.pipelines.components.transformers.preprocessing.featuretools.dfs")
@patch(
"blocktorch.pipelines.components.transformers.preprocessing.featuretools.calculate_feature_matrix"
)
def test_featuretools_index(mock_calculate_feature_matrix, mock_dfs, X_y_multi):
X, y = X_y_multi
X_pd = pd.DataFrame(X)
X_new_index = X_pd.copy()
index = [i for i in range(len(X))]
new_index = [i * 2 for i in index]
X_new_index["index"] = new_index
mock_calculate_feature_matrix.return_value = pd.DataFrame({})
# check if _make_entity_set keeps the intended index
feature = DFSTransformer()
feature.fit(X_new_index)
feature.transform(X_new_index)
arg_es = mock_dfs.call_args[1]["entityset"].entities[0].df["index"]
arg_tr = (
mock_calculate_feature_matrix.call_args[1]["entityset"].entities[0].df["index"]
)
assert arg_es.to_list() == new_index
assert arg_tr.to_list() == new_index
# check if _make_entity_set fills in the proper index values
feature.fit(X_pd)
feature.transform(X_pd)
arg_es = mock_dfs.call_args[1]["entityset"].entities[0].df["index"]
arg_tr = (
mock_calculate_feature_matrix.call_args[1]["entityset"].entities[0].df["index"]
)
assert arg_es.to_list() == index
assert arg_tr.to_list() == index
def test_transform(X_y_binary, X_y_multi, X_y_regression):
datasets = locals()
for dataset in datasets.values():
X, y = dataset
X_pd = pd.DataFrame(X)
X_pd.columns = X_pd.columns.astype(str)
es = ft.EntitySet()
es = es.entity_from_dataframe(
entity_id="X", dataframe=X_pd, index="index", make_index=True
)
feature_matrix, features = ft.dfs(entityset=es, target_entity="X")
feature = DFSTransformer()
feature.fit(X)
X_t = feature.transform(X)
assert_frame_equal(feature_matrix, X_t)
assert features == feature.features
feature.fit(X, y)
feature.transform(X)
X_pd.ww.init()
feature.fit(X_pd)
feature.transform(X_pd)
def test_transform_subset(X_y_binary, X_y_multi, X_y_regression):
datasets = locals()
for dataset in datasets.values():
X, y = dataset
X_pd = pd.DataFrame(X)
X_pd.columns = X_pd.columns.astype(str)
X_fit = X_pd.iloc[: len(X) // 3]
X_transform = X_pd.iloc[len(X) // 3 :]
es = ft.EntitySet()
es = es.entity_from_dataframe(
entity_id="X", dataframe=X_transform, index="index", make_index=True
)
feature_matrix, features = ft.dfs(entityset=es, target_entity="X")
feature = DFSTransformer()
feature.fit(X_fit)
X_t = feature.transform(X_transform)
assert_frame_equal(feature_matrix, X_t)
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(
pd.to_datetime(["20190902", "20200519", "20190607"], format="%Y%m%d")
),
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"], dtype="category")),
],
)
def test_ft_woodwork_custom_overrides_returned_by_components(X_df):
y = | pd.Series([1, 2, 1]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 16:15:24 2017
Sponsors Club messaging functions
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import re
import math
import textwrap
from tkinter import filedialog
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pkg.SC_signup_functions import findcards
from openpyxl import load_workbook
import pkg.SC_config as cnf
def emailparent_tk(teams, season, year):
''' Inteface for non-billing email messages to parents (non-generic)
Message types include:
recruit - specific inquiry about player from last year not yet signed up; needs signupfile w/ recruits tab
assign - notify of team assignment, optional recruit for short team, CYC card notify; teams/cards/mastersignups
missinguni - ask about missing uniforms; missingunifile
unireturn - generic instructions for uniform return; mastersignups w/ unis issued
askforcards - check for CYC card on file and ask
other -- Generic single all team+coaches message (can have $SCHOOL, $GRADERANGE,$COACHINFO, $SPORT, $PLAYERLIST)
8/9/17 works for team assignments
TODO test recruit, missing unis, unireturn
args:
teams - df w/ active teams
season -'Winter', 'Fall' or 'Spring'
year - starting sport year i.e. 2019 for 2019-20 school year
'''
#%%
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail to parents')
messageframe=tk.LabelFrame(root, text='Message options')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
recruitbool=tk.BooleanVar() # optional recruiting for short teams
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
transmessfile=tk.StringVar() # text of e-mail message for transfers
extravar=tk.StringVar() # use depends on message type... normally filename
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra_file_name.txt') # default starting choice
choice=tk.StringVar() # test or send -mail
def chooseFile(txtmess, ftypes):
''' tkinter file chooser (passes message string for window and expected
file types as tuple e.g. ('TXT','*.txt')
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = txtmess, filetypes=[ ftypes] )
root.destroy() # closes pop up window
return full_path
def choose_message():
# choose existing message (.txt file)
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose message file', filetypes=[ ('TXT','*.txt')] )
root.destroy() # closes pop up window
return full_path
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Assignopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
extraname.set('n/a')
messfile.set('parent_team_assignment.txt')
transmessfile.set('parent_team_transfer.txt')
emailtitle.set('Fall $SPORT for $FIRST')
def Recruitopts():
''' Display relevant choices for specific player recruiting'''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
messfile.set('player_recruiting.txt')
transmessfile.set('n/a')
extraname.set('n/a')
emailtitle.set('Cabrini-Soulard sports for $FIRST this fall?')
def Missingopts():
''' Display relevant choices for ask parent for missing uniforms '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('finish_me.txt')
transmessfile.set('n/a')
extraname.set('Missing uni file name')
extravar.set('missing_uni.csv')
# TODO look up most recent uni file?
emailtitle.set("Please return $FIRST's $SPORT uniform!")
def Schedopts():
''' Display relevant choices for sending schedules (game and practice) to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('parent_game_schedule.txt')
transmessfile.set('n/a')
extraname.set('Game schedule file')
extravar.set('Cabrini_2017_schedule.csv')
emailtitle.set("Game schedule for Cabrini $GRADERANGE $GENDER $SPORT")
def Cardopts():
''' Display relevant choices for asking parent for missing CYC cards '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.DISABLED)
messfile.set('CYCcard_needed.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("CYC card needed for $FIRST")
def Otheropts():
''' Display relevant choices for other generic message to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
def Allopts():
''' Display relevant choices for generic message to all sports parents '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
# E-mail title and message file name
rownum=0
tk.Label(messageframe, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(messageframe, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(messageframe, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='Transfer messagefile').grid(row=rownum, column=0)
transmessentry=tk.Entry(messageframe, textvariable=transmessfile)
transmessentry.config(width=50)
transmessentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot
tk.Radiobutton(messageframe, text='Team assignment', value='Assign', variable = mtype, command=Assignopts).grid(row=rownum, column=0)
tk.Radiobutton(messageframe, text='Recruit missing', value='Recruit', variable = mtype, command=Recruitopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Missing uni', value='Missing', variable = mtype, command=Missingopts).grid(row=rownum, column=2)
tk.Radiobutton(messageframe, text='Send schedule', value='Schedule', variable = mtype, command=Schedopts).grid(row=rownum, column=3)
rownum+=1
tk.Radiobutton(messageframe, text='Ask for cards', value='Cards', variable = mtype, command=Cardopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Other team message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='All sport parents', value='All', variable = mtype, command=Allopts).grid(row=rownum, column=2)
rownum+=1
tk.Label(messageframe, text=extraname.get()).grid(row=rownum, column=0)
extraentry=tk.Entry(messageframe, textvariable=extravar)
extraentry.grid(row=rownum, column=1)
# Extra file chooser button
# button arg includes file type extension .. get from messfile
try:
ft = extraname.get().split('.')[-1]
ftypes =("%s" %ft.upper(), "*.%s" %ft)
except:
ftypes =("CSV" , "*.*") # default to all files
# TODO fix extra file chooser
d=tk.Button(messageframe, text='Choose file', command=chooseFile('Choose extra file', ftypes) )
d.grid(row=rownum, column=2)
recruitcheck=tk.Checkbutton(messageframe, variable=recruitbool, text='Recruit more players for short teams?')
recruitcheck.grid(row=rownum, column=3) # can't do immediate grid or nonetype is returned
rownum+=1
messageframe.grid(row=0, column=0)
# Specific team selector section using checkboxes
teamframe=tk.LabelFrame(root, text='Team selector')
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(teamframe, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(teamframe, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(teamframe, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
teamframe.grid(row=1, column=0)
choiceframe=tk.LabelFrame(root)
d=tk.Button(choiceframe, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(choiceframe, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(choiceframe, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(choiceframe, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
choiceframe.grid(row=2, column=0)
root.mainloop()
#%%
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='KCtest':
# this is a true send test but only to me
kwargs.update({'KCtest':True})
mychoice='send'
kwargs.update({'choice':mychoice}) # test or send
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates(['Team','Sport'])
# Now deal with the different types of messages
#%%
if mtype.get()=='Schedule':
# Send practice and game schedules
try:
sched=pd.read_csv(extravar.get())
except:
print('Problem opening schedule and other required files for sending game schedules')
fname=filedialog.askopenfilename(title='Select schedule file.')
sched=pd.read_csv(fname)
# fields=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Fields')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
# open and send master CYC schedule
sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Recruit':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
except:
print('Problem loading family contacts')
try: # Recruits stored in CSV
Recruits=pd.read_csv(cnf._OUTPUT_DIR+'\\%s%s_recruits.csv' %(season, year))
print('Loaded possible recruits from csv file')
except:
fname=filedialog.askopenfilename(title='Select recruits file.')
if fname.endswith('.csv'): # final move is query for file
Recruits=pd.read_csv(fname)
else:
print('Recruits file needed in csv format.')
return
emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs)
if mtype.get()=='Assign':
# Notify parents needs teams, mastersignups, famcontacts
if recruitbool.get():
kwargs.update({'recruit':True})
try:
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches= | pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv', encoding='cp437') | pandas.read_csv |
import os
import json
CONFIG_LOCATION = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, "data", "path_config.json"))
with open(CONFIG_LOCATION) as _json_file:
CONFIG = json.load(_json_file)
DATA_DIR = CONFIG["main_data_dir"]
if not os.path.exists(DATA_DIR):
PROJECT_ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(PROJECT_ROOT_PATH, "data")
os.environ["S2AND_CACHE"] = os.path.join(DATA_DIR, ".feature_cache")
os.environ["OMP_NUM_THREADS"] = "8"
import copy
import argparse
import logging
import pickle
from typing import Dict, Any, Optional, List
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.cluster import DBSCAN
from sklearn.linear_model import BayesianRidge, LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.impute import IterativeImputer
from sklearn.pipeline import make_pipeline
from s2and.data import ANDData
from s2and.featurizer import featurize, FeaturizationInfo
from s2and.model import PairwiseModeler, Clusterer, FastCluster
from s2and.eval import pairwise_eval, cluster_eval, facet_eval
from s2and.consts import FEATURIZER_VERSION, DEFAULT_CHUNK_SIZE, NAME_COUNTS_PATH
from s2and.file_cache import cached_path
from s2and.plotting_utils import plot_facets
from hyperopt import hp
logger = logging.getLogger("s2and")
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
PAIRWISE_ONLY_DATASETS = {"medline"}
BLOCK_TYPE = "s2"
N_VAL_TEST_SIZE = 10000
N_ITER = 25
PREPROCESS = True
def transfer_helper(
source_dataset,
target_dataset,
experiment_name,
random_seed,
featurizer_info,
nameless_featurizer_info,
use_s2_clusters=False,
skip_shap=False,
):
source_name = source_dataset["name"]
target_name = target_dataset["name"]
if use_s2_clusters:
pairwise_metrics = {
"AUROC": None,
"Average Precision": None,
"F1": None,
"Precision": None,
"Recall": None,
}
else:
pairwise_metrics = pairwise_eval(
target_dataset["X_test"],
target_dataset["y_test"],
source_dataset["pairwise_modeler"],
os.path.join(DATA_DIR, "experiments", experiment_name, f"seed_{random_seed}", "figs"),
f"{source_name}_to_{target_name}",
featurizer_info.get_feature_names(),
nameless_classifier=source_dataset["nameless_pairwise_modeler"],
nameless_X=target_dataset["nameless_X_test"],
nameless_feature_names=nameless_featurizer_info.get_feature_names(),
skip_shap=skip_shap,
)
if target_name not in PAIRWISE_ONLY_DATASETS and source_name not in PAIRWISE_ONLY_DATASETS:
cluster_metrics, b3_metrics_per_signature = cluster_eval(
target_dataset["anddata"],
source_dataset["clusterer"],
split="test",
use_s2_clusters=use_s2_clusters,
)
else:
cluster_metrics = {
"B3 (P, R, F1)": (None, None, None),
"Cluster (P, R F1)": (None, None, None),
"Cluster Macro (P, R, F1)": (None, None, None),
"Pred bigger ratio (mean, count)": (None, None),
"True bigger ratio (mean, count)": (None, None),
}
b3_metrics_per_signature = None
if not use_s2_clusters:
metrics = {"pairwise": pairwise_metrics, "cluster": cluster_metrics}
logger.info(f"{source_name}_to_{target_name}: {metrics}")
with open(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
f"seed_{random_seed}",
"metrics",
f"{source_name}_to_{target_name}.json",
),
"w",
) as _json_file:
json.dump(metrics, _json_file, indent=4)
return pairwise_metrics, cluster_metrics, b3_metrics_per_signature
def summary_features_analysis(
union_firstname_f1,
union_affiliation_f1,
union_email_f1,
union_abstract_f1,
union_venue_f1,
union_references_f1,
union_coauthors_f1,
union_s2_firstname_f1,
union_s2_affiliation_f1,
union_s2_email_f1,
union_s2_abstract_f1,
union_s2_venue_f1,
union_s2_references_f1,
union_s2_coauthors_f1,
):
"""
Aggregates differences in performance for s2and and s2,
across different feature availability indicators.
"""
feature_summary = []
for s2and_feature_facet, s2_feature_facet in zip(
[
union_firstname_f1,
union_affiliation_f1,
union_email_f1,
union_abstract_f1,
union_venue_f1,
union_references_f1,
union_coauthors_f1,
],
[
union_s2_firstname_f1,
union_s2_affiliation_f1,
union_s2_email_f1,
union_s2_abstract_f1,
union_s2_venue_f1,
union_s2_references_f1,
union_s2_coauthors_f1,
],
):
s2and_pres_avg = sum(s2and_feature_facet[1]) / len(s2and_feature_facet[1])
s2and_abs_avg = sum(s2and_feature_facet[0]) / len(s2and_feature_facet[0])
s2_pres_avg = sum(s2_feature_facet[1]) / len(s2_feature_facet[1])
s2_abs_avg = sum(s2_feature_facet[0]) / len(s2_feature_facet[0])
feature_summary.append(
[
s2and_pres_avg,
s2and_abs_avg,
s2_pres_avg,
s2_abs_avg,
]
)
s2and_feature_facet_scores = {
"first_name_diff": np.round(feature_summary[0][0], 3) - np.round(feature_summary[0][1], 3),
"affilition_diff": np.round(feature_summary[1][0], 3) - np.round(feature_summary[1][1], 3),
"email_diff": np.round(feature_summary[2][0], 3) - np.round(feature_summary[2][1], 3),
"abstract_diff": np.round(feature_summary[3][0], 3) - np.round(feature_summary[3][1], 3),
"venue_diff": np.round(feature_summary[4][0], 3) - np.round(feature_summary[4][1], 3),
"references_diff": np.round(feature_summary[5][0], 3) - np.round(feature_summary[5][1], 3),
"coauthors_diff": np.round(feature_summary[6][0], 3) - np.round(feature_summary[6][1], 3),
}
s2_feature_facet_scores = {
"first_name_diff": np.round(feature_summary[0][2], 3) - np.round(feature_summary[0][3], 3),
"affilition_diff": np.round(feature_summary[1][2], 3) - np.round(feature_summary[1][3], 3),
"email_diff": np.round(feature_summary[2][2], 3) - np.round(feature_summary[2][3], 3),
"abstract_diff": np.round(feature_summary[3][2], 3) - np.round(feature_summary[3][3], 3),
"venue_diff": np.round(feature_summary[4][2], 3) - np.round(feature_summary[4][3], 3),
"references_diff": np.round(feature_summary[5][2], 3) - np.round(feature_summary[5][3], 3),
"coauthors_diff": np.round(feature_summary[6][2], 3) - np.round(feature_summary[6][3], 3),
}
return s2and_feature_facet_scores, s2_feature_facet_scores
def disparity_analysis(
comb_s2and_facet_f1,
comb_s2_facet_f1,
):
"""
Studying disparity for different gender and ethnicity groups.
Metric 1: Standard deviation
Metric 2: Sum of difference from privileged group performance (found by max performance)
Also finds the best and worst performing groups, synonymous to most and least privileged ones.
"""
s2and_f1 = []
s2_f1 = []
keylist = []
for facet, f1 in comb_s2and_facet_f1.items():
# skipping "-" which is present in gender
if facet == "-":
continue
s2and_average = sum(comb_s2and_facet_f1[facet]) / len(comb_s2and_facet_f1[facet])
s2_average = sum(comb_s2_facet_f1[facet]) / len(comb_s2_facet_f1[facet])
keylist.append(facet[0:3])
s2and_f1.append(s2and_average)
s2_f1.append(s2_average)
print("facet", keylist, s2and_f1, s2_f1)
s2and_deviation = np.std(s2and_f1)
s2_deviation = np.std(s2_f1)
s2and_max_performance = max(s2and_f1)
s2_max_performance = max(s2_f1)
s2and_min_performance = min(s2and_f1)
s2_min_performance = min(s2_f1)
s2and_max_group = keylist[s2and_f1.index(s2and_max_performance)]
s2_max_group = keylist[s2_f1.index(s2_max_performance)]
s2and_min_group = keylist[s2and_f1.index(s2and_min_performance)]
s2_min_group = keylist[s2_f1.index(s2_min_performance)]
s2and_sum_deviation = 0
for group_f1 in s2and_f1:
s2and_sum_deviation += s2and_max_performance - group_f1
s2_sum_deviation = 0
for group_f1 in s2_f1:
s2_sum_deviation += s2_max_performance - group_f1
disparity_scores = {
"S2AND std": np.round(s2and_deviation, 3),
"S2 std": np.round(s2_deviation, 3),
"S2AND sum-diff": np.round(s2and_sum_deviation, 3),
"S2 sum-diff": np.round(s2_sum_deviation, 3),
"S2AND max-perf-group": s2and_max_group,
"S2 max-perf-group": s2_max_group,
"S2AND min-perf-group": s2and_min_group,
"S2 min-perf-group": s2_min_group,
"S2AND max-perf": np.round(s2and_max_performance, 3),
"S2 max-perf": np.round(s2_max_performance, 3),
"S2AND min-perf": np.round(s2and_min_performance, 3),
"S2 min-perf": np.round(s2_min_performance, 3),
}
return disparity_scores
def update_facets(
gender_f1,
ethnicity_f1,
author_num_f1,
year_f1,
block_len_f1,
cluster_len_f1,
homonymity_f1,
synonymity_f1,
firstname_f1,
affiliation_f1,
email_f1,
abstract_f1,
venue_f1,
references_f1,
coauthors_f1,
comb_gender_f1,
comb_ethnicity_f1,
comb_author_num_f1,
comb_year_f1,
comb_block_len_f1,
comb_cluster_len_f1,
comb_homonymity_f1,
comb_synonymity_f1,
comb_firstname_f1,
comb_affiliaition_f1,
comb_email_f1,
comb_abstract_f1,
comb_venue_f1,
comb_references_f1,
comb_coauthors_f1,
):
"""
Macro-average over individual facets.
"""
for individual_facet, combined_facet in zip(
[
gender_f1,
ethnicity_f1,
author_num_f1,
year_f1,
block_len_f1,
cluster_len_f1,
homonymity_f1,
synonymity_f1,
firstname_f1,
affiliation_f1,
email_f1,
abstract_f1,
venue_f1,
references_f1,
coauthors_f1,
],
[
comb_gender_f1,
comb_ethnicity_f1,
comb_author_num_f1,
comb_year_f1,
comb_block_len_f1,
comb_cluster_len_f1,
comb_homonymity_f1,
comb_synonymity_f1,
comb_firstname_f1,
comb_affiliaition_f1,
comb_email_f1,
comb_abstract_f1,
comb_venue_f1,
comb_references_f1,
comb_coauthors_f1,
],
):
for key, f1 in individual_facet.items():
combined_facet[key].extend(f1)
return (
comb_gender_f1,
comb_ethnicity_f1,
comb_author_num_f1,
comb_year_f1,
comb_block_len_f1,
comb_cluster_len_f1,
comb_homonymity_f1,
comb_synonymity_f1,
comb_firstname_f1,
comb_affiliaition_f1,
comb_email_f1,
comb_abstract_f1,
comb_venue_f1,
comb_references_f1,
comb_coauthors_f1,
)
def main(
experiment_name: str,
dont_use_nameless_model: bool,
n_jobs: int,
dont_use_monotone_constraints: bool,
exclude_medline: bool,
linkage: str,
use_dbscan: bool,
leave_self_in: bool,
random_seed: int,
skip_individual_models: bool,
skip_union_models: bool,
n_train_pairs_size: int,
feature_groups_to_skip: List[str],
use_linear_pairwise_model: bool,
gender_ethnicity_available: bool,
use_cache: bool,
):
if not os.path.exists(os.path.join(DATA_DIR, "experiments", experiment_name, f"seed_{random_seed}", "metrics")):
os.makedirs(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
f"seed_{random_seed}",
"metrics",
)
)
USE_NAMELESS_MODEL = not dont_use_nameless_model
N_JOBS = n_jobs
USE_MONOTONE_CONSTRAINTS = not dont_use_monotone_constraints
LEAVE_SELF_OUT_FOR_UNION = not leave_self_in
INDIVIDUAL_MODELS = not skip_individual_models
UNION_MODELS = not skip_union_models
N_TRAIN_PAIRS_SIZE = n_train_pairs_size
USE_CACHE = use_cache
logger.info(
(
f"USE_NAMELESS_MODEL={USE_NAMELESS_MODEL}, "
f"N_JOBS={N_JOBS}, "
f"USE_MONOTONE_CONSTRAINTS={USE_MONOTONE_CONSTRAINTS}, "
f"exclude_medline={exclude_medline}, "
f"linkage={linkage}, "
f"use_dbscan={use_dbscan}, "
f"LEAVE_SELF_OUT_FOR_UNION={LEAVE_SELF_OUT_FOR_UNION}, "
f"random_seed={random_seed}, "
f"INDIVIDUAL_MODELS={INDIVIDUAL_MODELS}, "
f"UNION_MODELS={UNION_MODELS}, "
f"N_TRAIN_PAIRS_SIZE={N_TRAIN_PAIRS_SIZE}, "
f"feature_groups_to_skip={feature_groups_to_skip}, "
f"use_linear_pairwise_model={use_linear_pairwise_model}, "
f"USE_CACHE={USE_CACHE}, "
)
)
FEATURES_TO_USE = [
"name_similarity",
"affiliation_similarity",
"email_similarity",
"coauthor_similarity",
"venue_similarity",
"year_diff",
"title_similarity",
"reference_features",
"misc_features",
"name_counts",
"embedding_similarity",
"journal_similarity",
"advanced_name_similarity",
]
for feature_group in feature_groups_to_skip:
FEATURES_TO_USE.remove(feature_group)
NAMELESS_FEATURES_TO_USE = [
feature_name
for feature_name in FEATURES_TO_USE
if feature_name not in {"name_similarity", "advanced_name_similarity", "name_counts"}
]
FEATURIZER_INFO = FeaturizationInfo(features_to_use=FEATURES_TO_USE, featurizer_version=FEATURIZER_VERSION)
NAMELESS_FEATURIZER_INFO = FeaturizationInfo(
features_to_use=NAMELESS_FEATURES_TO_USE, featurizer_version=FEATURIZER_VERSION
)
SOURCE_DATASET_NAMES = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
TARGET_DATASET_NAMES = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
DATASETS_FOR_UNION: List[str] = [
"aminer",
"arnetminer",
"inspire",
"kisti",
"pubmed",
"qian",
"zbmath",
]
if not exclude_medline:
SOURCE_DATASET_NAMES.append("medline")
TARGET_DATASET_NAMES.append("medline")
DATASETS_FOR_UNION.append("medline")
MONOTONE_CONSTRAINTS = FEATURIZER_INFO.lightgbm_monotone_constraints
NAMELESS_MONOTONE_CONSTRAINTS = NAMELESS_FEATURIZER_INFO.lightgbm_monotone_constraints
NAN_VALUE = np.nan
cluster_search_space: Dict[str, Any] = {
"eps": hp.uniform("choice", 0, 1),
}
pairwise_search_space: Optional[Dict[str, Any]] = None
estimator: Any = None
if use_linear_pairwise_model:
estimator = make_pipeline(
StandardScaler(),
IterativeImputer(
max_iter=20,
random_state=random_seed,
estimator=BayesianRidge(),
skip_complete=True,
add_indicator=True,
n_nearest_features=10,
verbose=0,
),
LogisticRegressionCV(
Cs=[0.01, 0.1, 1.0, 10],
solver="saga",
random_state=random_seed,
n_jobs=N_JOBS,
verbose=0,
max_iter=10000,
tol=1e-3,
),
)
pairwise_search_space = {}
if UNION_MODELS:
DATASETS_TO_TRAIN = set(SOURCE_DATASET_NAMES).union(set(TARGET_DATASET_NAMES)).union(set(DATASETS_FOR_UNION))
else:
DATASETS_TO_TRAIN = set(SOURCE_DATASET_NAMES).union(set(TARGET_DATASET_NAMES))
if LEAVE_SELF_OUT_FOR_UNION:
UNION_DATASETS_TO_TRAIN = set()
for dataset_name in TARGET_DATASET_NAMES:
one_left_out_dataset = set(DATASETS_FOR_UNION) - {dataset_name}
UNION_DATASETS_TO_TRAIN.add(tuple(sorted(list(one_left_out_dataset))))
else:
UNION_DATASETS_TO_TRAIN = {tuple(DATASETS_FOR_UNION)}
logger.info("starting transfer experiment main, loading name counts")
with open(cached_path(NAME_COUNTS_PATH), "rb") as f:
(
first_dict,
last_dict,
first_last_dict,
last_first_initial_dict,
) = pickle.load(f)
name_counts = {
"first_dict": first_dict,
"last_dict": last_dict,
"first_last_dict": first_last_dict,
"last_first_initial_dict": last_first_initial_dict,
}
logger.info("loaded name counts")
datasets: Dict[str, Any] = {}
for dataset_name in tqdm(DATASETS_TO_TRAIN, desc="Processing datasets and fitting base models"):
logger.info("")
logger.info(f"processing dataset {dataset_name}")
clusters_path: Optional[str] = None
if dataset_name not in PAIRWISE_ONLY_DATASETS:
clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + "_clusters.json")
train_pairs_path = None
val_pairs_path = None
test_pairs_path = None
else:
train_pairs_path = os.path.join(DATA_DIR, dataset_name, "train_pairs.csv")
val_pairs_path = os.path.join(DATA_DIR, dataset_name, "val_pairs.csv")
if not os.path.exists(val_pairs_path):
val_pairs_path = None
test_pairs_path = os.path.join(DATA_DIR, dataset_name, "test_pairs.csv")
logger.info(f"loading dataset {dataset_name}")
anddata = ANDData(
signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + "_signatures.json"),
papers=os.path.join(DATA_DIR, dataset_name, dataset_name + "_papers.json"),
name=dataset_name,
mode="train",
specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + "_specter.pickle"),
clusters=clusters_path,
block_type=BLOCK_TYPE,
train_pairs=train_pairs_path,
val_pairs=val_pairs_path,
test_pairs=test_pairs_path,
train_pairs_size=np.maximum(N_TRAIN_PAIRS_SIZE, 100000),
val_pairs_size=N_VAL_TEST_SIZE,
test_pairs_size=N_VAL_TEST_SIZE,
n_jobs=N_JOBS,
load_name_counts=name_counts,
preprocess=PREPROCESS,
random_seed=random_seed,
)
logger.info(f"dataset {dataset_name} loaded")
logger.info(f"featurizing {dataset_name}")
train, val, test = featurize(anddata, FEATURIZER_INFO, n_jobs=N_JOBS, use_cache=USE_CACHE, chunk_size=DEFAULT_CHUNK_SIZE, nameless_featurizer_info=NAMELESS_FEATURIZER_INFO, nan_value=NAN_VALUE) # type: ignore
X_train, y_train, nameless_X_train = train
# if we sampled more training pairs than required, then we downsample
if len(y_train) > N_TRAIN_PAIRS_SIZE:
np.random.seed(random_seed)
subset_indices = np.random.choice(len(y_train), size=N_TRAIN_PAIRS_SIZE, replace=False)
X_train = X_train[subset_indices, :]
if nameless_X_train is not None:
nameless_X_train = nameless_X_train[subset_indices, :]
y_train = y_train[subset_indices]
X_val, y_val, nameless_X_val = val
assert test is not None
X_test, y_test, nameless_X_test = test
logger.info(f"dataset {dataset_name} featurized")
pairwise_modeler: Optional[PairwiseModeler] = None
nameless_pairwise_modeler = None
cluster: Optional[Clusterer] = None
if INDIVIDUAL_MODELS and dataset_name in SOURCE_DATASET_NAMES:
logger.info(f"fitting pairwise for {dataset_name}")
pairwise_modeler = PairwiseModeler(
n_iter=N_ITER,
estimator=estimator,
search_space=pairwise_search_space,
monotone_constraints=MONOTONE_CONSTRAINTS if USE_MONOTONE_CONSTRAINTS else None,
random_state=random_seed,
)
pairwise_modeler.fit(X_train, y_train, X_val, y_val)
logger.info(f"pairwise fit for {dataset_name}")
if USE_NAMELESS_MODEL:
logger.info(f"nameless fitting pairwise for {dataset_name}")
nameless_pairwise_modeler = PairwiseModeler(
n_iter=N_ITER,
estimator=estimator,
search_space=pairwise_search_space,
monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS if USE_MONOTONE_CONSTRAINTS else None,
random_state=random_seed,
)
nameless_pairwise_modeler.fit(nameless_X_train, y_train, nameless_X_val, y_val)
logger.info(f"nameless pairwise fit for {dataset_name}")
distances_for_sparsity = [1 - pred[1] for pred in pairwise_modeler.predict_proba(X_train)]
threshold = np.percentile(distances_for_sparsity, [10, 20, 30, 40, 50, 60, 70, 80, 90])
logger.info(f"Thresholds {threshold}")
if dataset_name not in PAIRWISE_ONLY_DATASETS:
logger.info(f"fitting clusterer for {dataset_name}")
cluster = Clusterer(
FEATURIZER_INFO,
pairwise_modeler.classifier,
cluster_model=FastCluster(linkage=linkage)
if not use_dbscan
else DBSCAN(min_samples=1, metric="precomputed"),
search_space=cluster_search_space,
n_jobs=N_JOBS,
use_cache=USE_CACHE,
nameless_classifier=nameless_pairwise_modeler.classifier
if nameless_pairwise_modeler is not None
else None,
nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,
random_state=random_seed,
use_default_constraints_as_supervision=False,
)
cluster.fit(anddata)
logger.info(f"clusterer fit for {dataset_name}")
logger.info(f"{dataset_name} best clustering parameters: " + str(cluster.best_params))
dataset: Dict[str, Any] = {}
dataset["anddata"] = anddata
dataset["X_train"] = X_train
dataset["y_train"] = y_train
dataset["X_val"] = X_val
dataset["y_val"] = y_val
dataset["X_test"] = X_test
dataset["y_test"] = y_test
dataset["pairwise_modeler"] = pairwise_modeler
dataset["nameless_X_train"] = nameless_X_train
dataset["nameless_X_val"] = nameless_X_val
dataset["nameless_X_test"] = nameless_X_test
dataset["nameless_pairwise_modeler"] = nameless_pairwise_modeler
dataset["clusterer"] = cluster
dataset["name"] = anddata.name
datasets[dataset_name] = dataset
if UNION_MODELS:
unions = {}
for dataset_name_tuple in tqdm(UNION_DATASETS_TO_TRAIN, desc="Fitting union models..."):
logger.info("")
logger.info("loading dataset for " + str(dataset_name_tuple))
anddatas = [
datasets[dataset_name]["anddata"]
for dataset_name in dataset_name_tuple
if dataset_name not in PAIRWISE_ONLY_DATASETS
]
X_train = np.vstack([datasets[dataset_name]["X_train"] for dataset_name in dataset_name_tuple])
y_train = np.hstack([datasets[dataset_name]["y_train"] for dataset_name in dataset_name_tuple])
X_val = np.vstack([datasets[dataset_name]["X_val"] for dataset_name in dataset_name_tuple])
y_val = np.hstack([datasets[dataset_name]["y_val"] for dataset_name in dataset_name_tuple])
nameless_X_train = np.vstack(
[datasets[dataset_name]["nameless_X_train"] for dataset_name in dataset_name_tuple]
)
nameless_X_val = np.vstack(
[datasets[dataset_name]["nameless_X_val"] for dataset_name in dataset_name_tuple]
)
logger.info("dataset loaded for " + str(dataset_name_tuple))
logger.info("fitting pairwise for " + str(dataset_name_tuple))
union_classifier = PairwiseModeler(
n_iter=N_ITER,
estimator=estimator,
search_space=pairwise_search_space,
monotone_constraints=MONOTONE_CONSTRAINTS if USE_MONOTONE_CONSTRAINTS else None,
random_state=random_seed,
)
union_classifier.fit(X_train, y_train, X_val, y_val)
logger.info("pairwise fit for " + str(dataset_name_tuple))
nameless_union_classifier = None
if USE_NAMELESS_MODEL:
logger.info("nameless fitting pairwise for " + str(dataset_name_tuple))
nameless_union_classifier = PairwiseModeler(
n_iter=N_ITER,
estimator=estimator,
search_space=pairwise_search_space,
monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS if USE_MONOTONE_CONSTRAINTS else None,
random_state=random_seed,
)
nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)
logger.info("nameless pairwise fit for " + str(dataset_name_tuple))
union_clusterer: Optional[Clusterer] = None
if len(anddatas) > 0:
distances_for_sparsity = [1 - pred[1] for pred in union_classifier.predict_proba(X_train)]
threshold = np.percentile(distances_for_sparsity, [10, 20, 30, 40, 50, 60, 70, 80, 90])
logger.info(f"Thresholds {threshold}")
logger.info("fitting clusterer for " + str(dataset_name_tuple))
union_clusterer = Clusterer(
FEATURIZER_INFO,
union_classifier.classifier,
cluster_model=FastCluster(linkage=linkage)
if not use_dbscan
else DBSCAN(min_samples=1, metric="precomputed"),
search_space=cluster_search_space,
n_jobs=N_JOBS,
use_cache=USE_CACHE,
nameless_classifier=nameless_union_classifier.classifier
if nameless_union_classifier is not None
else None,
nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,
random_state=random_seed,
use_default_constraints_as_supervision=False,
)
with open(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
f"seed_{random_seed}",
f"MODEL_{'_'.join(dataset_name_tuple)}.pickle",
),
"wb",
) as _pickle_file:
pickle.dump(union_clusterer, _pickle_file)
union_clusterer.fit(anddatas)
logger.info("clusterer fit for " + str(dataset_name_tuple))
logger.info(f"{dataset_name_tuple} best clustering parameters: " + str(union_clusterer.best_params))
models: Dict[str, Any] = {}
models["pairwise_modeler"] = union_classifier
models["nameless_pairwise_modeler"] = nameless_union_classifier
models["clusterer"] = union_clusterer
models["name"] = "union__" + "_".join(dataset_name_tuple)
unions[dataset_name_tuple] = models
logger.info("")
logger.info("making evaluation grids")
b3_f1_grid = [
["" for j in range(len(TARGET_DATASET_NAMES) + 1)]
for i in range(len(SOURCE_DATASET_NAMES) + 1 + 2 * int(UNION_MODELS))
]
for i in range(max(len(TARGET_DATASET_NAMES), len(SOURCE_DATASET_NAMES))):
if i < len(TARGET_DATASET_NAMES):
b3_f1_grid[0][i + 1] = TARGET_DATASET_NAMES[i]
if i < len(SOURCE_DATASET_NAMES):
b3_f1_grid[i + 1][0] = SOURCE_DATASET_NAMES[i]
if UNION_MODELS:
b3_f1_grid[len(SOURCE_DATASET_NAMES) + 1][0] = "union"
b3_f1_grid[len(SOURCE_DATASET_NAMES) + 2][0] = "s2"
pairwise_auroc_grid = copy.deepcopy(b3_f1_grid) # makes a copy of the grid
true_bigger_ratios_and_counts_grid = copy.deepcopy(b3_f1_grid)
pred_bigger_ratios_and_counts_grid = copy.deepcopy(b3_f1_grid)
# transfer of individual models
if INDIVIDUAL_MODELS:
logger.info("starting individual model evaluation")
for _, source_dataset in tqdm(datasets.items(), desc="Evaluating individual models"):
for _, target_dataset in datasets.items():
if not (source_dataset["name"] in SOURCE_DATASET_NAMES) or (
not target_dataset["name"] in TARGET_DATASET_NAMES
):
continue
logger.info("")
logger.info(f"evaluating source {source_dataset['name']} target {target_dataset['name']}")
pairwise_metrics, cluster_metrics, _ = transfer_helper(
source_dataset,
target_dataset,
experiment_name,
random_seed,
FEATURIZER_INFO,
NAMELESS_FEATURIZER_INFO,
skip_shap=use_linear_pairwise_model, # skip SHAP if not using default model
)
b3_f1_grid[SOURCE_DATASET_NAMES.index(source_dataset["name"]) + 1][
TARGET_DATASET_NAMES.index(target_dataset["name"]) + 1
] = cluster_metrics["B3 (P, R, F1)"][2]
pairwise_auroc_grid[SOURCE_DATASET_NAMES.index(source_dataset["name"]) + 1][
TARGET_DATASET_NAMES.index(target_dataset["name"]) + 1
] = pairwise_metrics["AUROC"]
true_bigger_ratios_and_counts_grid[SOURCE_DATASET_NAMES.index(source_dataset["name"]) + 1][
TARGET_DATASET_NAMES.index(target_dataset["name"]) + 1
] = cluster_metrics["True bigger ratio (mean, count)"]
pred_bigger_ratios_and_counts_grid[SOURCE_DATASET_NAMES.index(source_dataset["name"]) + 1][
TARGET_DATASET_NAMES.index(target_dataset["name"]) + 1
] = cluster_metrics["Pred bigger ratio (mean, count)"]
logger.info(f"finished evaluating source {source_dataset['name']} target {target_dataset['name']}")
logger.info("finished individual model evaluation")
# union
if UNION_MODELS:
union_gender_f1: Dict[str, List] = defaultdict(list)
union_ethnicity_f1: Dict[str, List] = defaultdict(list)
union_author_num_f1: Dict[int, List] = defaultdict(list)
union_year_f1: Dict[int, List] = defaultdict(list)
union_block_len_f1: Dict[int, List] = defaultdict(list)
union_cluster_len_f1: Dict[int, List] = defaultdict(list)
union_homonymity_f1: Dict[int, List] = defaultdict(list)
union_synonymity_f1: Dict[int, List] = defaultdict(list)
# features availability
union_firstname_f1: Dict[str, List] = defaultdict(list)
union_affiliation_f1: Dict[str, List] = defaultdict(list)
union_email_f1: Dict[int, List] = defaultdict(list)
union_abstract_f1: Dict[int, List] = defaultdict(list)
union_venue_f1: Dict[int, List] = defaultdict(list)
union_references_f1: Dict[int, List] = defaultdict(list)
union_coauthors_f1: Dict[int, List] = defaultdict(list)
union_s2_gender_f1: Dict[str, List] = defaultdict(list)
union_s2_ethnicity_f1: Dict[str, List] = defaultdict(list)
union_s2_author_num_f1: Dict[int, List] = defaultdict(list)
union_s2_year_f1: Dict[int, List] = defaultdict(list)
union_s2_block_len_f1: Dict[int, List] = defaultdict(list)
union_s2_cluster_len_f1: Dict[int, List] = defaultdict(list)
union_s2_homonymity_f1: Dict[int, List] = defaultdict(list)
union_s2_synonymity_f1: Dict[int, List] = defaultdict(list)
# features availability
union_s2_firstname_f1: Dict[str, List] = defaultdict(list)
union_s2_affiliation_f1: Dict[str, List] = defaultdict(list)
union_s2_email_f1: Dict[int, List] = defaultdict(list)
union_s2_abstract_f1: Dict[int, List] = defaultdict(list)
union_s2_venue_f1: Dict[int, List] = defaultdict(list)
union_s2_references_f1: Dict[int, List] = defaultdict(list)
union_s2_coauthors_f1: Dict[int, List] = defaultdict(list)
union_signature_wise_facets = defaultdict(list)
union_s2_signature_wise_facets = defaultdict(list)
logger.info("started evaluating unions")
for _, target_dataset in tqdm(datasets.items(), desc="Evaluating union models"):
target_name = target_dataset["name"]
if target_name not in TARGET_DATASET_NAMES:
continue
logger.info("")
logger.info(f"evaluating union for {target_name}")
if LEAVE_SELF_OUT_FOR_UNION:
one_left_out_dataset = set(DATASETS_FOR_UNION) - {target_name}
dataset_name_tuple = tuple(sorted(list(one_left_out_dataset)))
else:
dataset_name_tuple = tuple(DATASETS_FOR_UNION)
source_dataset = unions[dataset_name_tuple]
(pairwise_metrics, cluster_metrics, b3_metrics_per_signature,) = transfer_helper(
source_dataset,
target_dataset,
experiment_name,
random_seed,
FEATURIZER_INFO,
NAMELESS_FEATURIZER_INFO,
skip_shap=use_linear_pairwise_model, # skip SHAP if not using default model
)
(s2_pairwise_metrics, s2_cluster_metrics, s2_b3_metrics_per_signature,) = transfer_helper(
source_dataset,
target_dataset,
experiment_name,
random_seed,
FEATURIZER_INFO,
NAMELESS_FEATURIZER_INFO,
use_s2_clusters=True,
skip_shap=use_linear_pairwise_model, # skip SHAP if not using default model
)
if b3_metrics_per_signature is not None:
(
gender_f1,
ethnicity_f1,
author_num_f1,
year_f1,
block_len_f1,
cluster_len_f1,
homonymity_f1,
synonymity_f1,
firstname_f1,
affiliation_f1,
email_f1,
abstract_f1,
venue_f1,
references_f1,
coauthors_f1,
signature_wise_facets,
) = facet_eval(target_dataset["anddata"], b3_metrics_per_signature, BLOCK_TYPE)
union_signature_wise_facets[target_name] = signature_wise_facets
(
union_gender_f1,
union_ethnicity_f1,
union_author_num_f1,
union_year_f1,
union_block_len_f1,
union_cluster_len_f1,
union_homonymity_f1,
union_synonymity_f1,
union_firstname_f1,
union_affiliation_f1,
union_email_f1,
union_abstract_f1,
union_venue_f1,
union_references_f1,
union_coauthors_f1,
) = update_facets(
gender_f1,
ethnicity_f1,
author_num_f1,
year_f1,
block_len_f1,
cluster_len_f1,
homonymity_f1,
synonymity_f1,
firstname_f1,
affiliation_f1,
email_f1,
abstract_f1,
venue_f1,
references_f1,
coauthors_f1,
union_gender_f1,
union_ethnicity_f1,
union_author_num_f1,
union_year_f1,
union_block_len_f1,
union_cluster_len_f1,
union_homonymity_f1,
union_synonymity_f1,
union_firstname_f1,
union_affiliation_f1,
union_email_f1,
union_abstract_f1,
union_venue_f1,
union_references_f1,
union_coauthors_f1,
)
if s2_b3_metrics_per_signature is not None:
(
s2_gender_f1,
s2_ethnicity_f1,
s2_author_num_f1,
s2_year_f1,
s2_block_len_f1,
s2_cluster_len_f1,
s2_homonymity_f1,
s2_synonymity_f1,
s2_firstname_f1,
s2_affiliation_f1,
s2_email_f1,
s2_abstract_f1,
s2_venue_f1,
s2_references_f1,
s2_coauthors_f1,
s2_signature_wise_facets,
) = facet_eval(target_dataset["anddata"], s2_b3_metrics_per_signature, BLOCK_TYPE)
union_s2_signature_wise_facets[target_name] = s2_signature_wise_facets
(
union_s2_gender_f1,
union_s2_ethnicity_f1,
union_s2_author_num_f1,
union_s2_year_f1,
union_s2_block_len_f1,
union_s2_cluster_len_f1,
union_s2_homonymity_f1,
union_s2_synonymity_f1,
union_s2_firstname_f1,
union_s2_affiliation_f1,
union_s2_email_f1,
union_s2_abstract_f1,
union_s2_venue_f1,
union_s2_references_f1,
union_s2_coauthors_f1,
) = update_facets(
s2_gender_f1,
s2_ethnicity_f1,
s2_author_num_f1,
s2_year_f1,
s2_block_len_f1,
s2_cluster_len_f1,
s2_homonymity_f1,
s2_synonymity_f1,
s2_firstname_f1,
s2_affiliation_f1,
s2_email_f1,
s2_abstract_f1,
s2_venue_f1,
s2_references_f1,
s2_coauthors_f1,
union_s2_gender_f1,
union_s2_ethnicity_f1,
union_s2_author_num_f1,
union_s2_year_f1,
union_s2_block_len_f1,
union_s2_cluster_len_f1,
union_s2_homonymity_f1,
union_s2_synonymity_f1,
union_s2_firstname_f1,
union_s2_affiliation_f1,
union_s2_email_f1,
union_s2_abstract_f1,
union_s2_venue_f1,
union_s2_references_f1,
union_s2_coauthors_f1,
)
b3_f1_grid[len(SOURCE_DATASET_NAMES) + 1][TARGET_DATASET_NAMES.index(target_name) + 1] = cluster_metrics[
"B3 (P, R, F1)"
][2]
pairwise_auroc_grid[len(SOURCE_DATASET_NAMES) + 1][
TARGET_DATASET_NAMES.index(target_name) + 1
] = pairwise_metrics["AUROC"]
true_bigger_ratios_and_counts_grid[len(SOURCE_DATASET_NAMES) + 1][
TARGET_DATASET_NAMES.index(target_name) + 1
] = cluster_metrics["True bigger ratio (mean, count)"]
b3_f1_grid[len(SOURCE_DATASET_NAMES) + 2][TARGET_DATASET_NAMES.index(target_name) + 1] = s2_cluster_metrics[
"B3 (P, R, F1)"
][2]
pairwise_auroc_grid[len(SOURCE_DATASET_NAMES) + 2][
TARGET_DATASET_NAMES.index(target_name) + 1
] = s2_pairwise_metrics["AUROC"]
true_bigger_ratios_and_counts_grid[len(SOURCE_DATASET_NAMES) + 1][
TARGET_DATASET_NAMES.index(target_name) + 1
] = cluster_metrics["True bigger ratio (mean, count)"]
pred_bigger_ratios_and_counts_grid[len(SOURCE_DATASET_NAMES) + 1][
TARGET_DATASET_NAMES.index(target_name) + 1
] = cluster_metrics["Pred bigger ratio (mean, count)"]
logger.info(f"finished evaluating union for {target_name}")
logger.info("finished evaluating unions")
if not os.path.exists(os.path.join(DATA_DIR, "experiments", experiment_name, "facets")):
os.makedirs(os.path.join(DATA_DIR, "experiments", experiment_name, "facets"))
s2and_feature_summary, s2_feature_summary = summary_features_analysis(
union_firstname_f1,
union_affiliation_f1,
union_email_f1,
union_abstract_f1,
union_venue_f1,
union_references_f1,
union_coauthors_f1,
union_s2_firstname_f1,
union_s2_affiliation_f1,
union_s2_email_f1,
union_s2_abstract_f1,
union_s2_venue_f1,
union_s2_references_f1,
union_s2_coauthors_f1,
)
with open(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"union_signature_wise_facets.json",
),
"w",
) as fout:
json.dump(union_signature_wise_facets, fout)
with open(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"union_s2_signature_wise_facets.json",
),
"w",
) as fout:
json.dump(union_s2_signature_wise_facets, fout)
if gender_ethnicity_available:
gender_disparity = disparity_analysis(union_gender_f1, union_s2_gender_f1)
ethnicity_disparity = disparity_analysis(union_ethnicity_f1, union_s2_ethnicity_f1)
logger.info("")
logger.info("disparity analysis")
print("Gender Disparity in F1:")
gender_disparity_df = pd.DataFrame(gender_disparity, index=[0])
print(gender_disparity_df)
print()
print("Ethnicity Disparity in F1:")
ethnicity_disparity_df = pd.DataFrame(ethnicity_disparity, index=[0])
print(ethnicity_disparity_df)
print()
gender_disparity_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"gender_disparity.csv",
),
index=False,
)
ethnicity_disparity_df.to_csv(
os.path.join(
DATA_DIR,
"experiments",
experiment_name,
"ethnicity_disparity.csv",
),
index=False,
)
print("S2AND Feature effect in F1:")
s2and_feature_df = | pd.DataFrame(s2and_feature_summary, index=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 20:56:31 2019
@author: olegm
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
test = pd.read_csv('test.csv')
train = pd.read_csv('train.csv')
submission = pd.read_csv('gender_submission.csv')
#extracting some data
x_train = train.loc[:, ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"]]
#dependant variable
y_train = train.loc [:, "Survived"]
#Filling NaN values with zeroes
train_age = x_train.loc[:, "Age"]
x_train.loc[:, "Age"] = train_age.fillna(train_age.mean())
encoder = LabelEncoder()
"""
Encoding gender to binary values -> male = 1, female = 0
"""
x_train.loc[:,"Sex"] = encoder.fit_transform(x_train.loc[:,"Sex"])
#Joining two dataframes together by passanger ID
test_formed = | pd.merge(left= test, right=submission, how="left", left_on="PassengerId", right_on="PassengerId") | pandas.merge |
import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from typing import Tuple, Dict
from .template import Processor
from .normalization import CountNormalization
class PlotTaxonBarplots(Processor):
DSTDIR_NAME = 'taxon-barplot'
taxon_table_tsv_dict: Dict[str, str]
n_taxa: int
dstdir: str
def main(
self,
taxon_table_tsv_dict: Dict[str, str],
n_taxa: int):
self.taxon_table_tsv_dict = taxon_table_tsv_dict
self.n_taxa = n_taxa
self.make_dstdir()
for level, tsv in self.taxon_table_tsv_dict.items():
self.plot_one_taxon_barplot(level=level, tsv=tsv)
def make_dstdir(self):
self.dstdir = f'{self.outdir}/{self.DSTDIR_NAME}'
os.makedirs(self.dstdir, exist_ok=True)
def plot_one_taxon_barplot(
self,
level: str,
tsv: str):
PlotOneTaxonBarplot(self.settings).main(
taxon_level=level,
taxon_table_tsv=tsv,
n_taxa=self.n_taxa,
dstdir=self.dstdir)
class PlotOneTaxonBarplot(Processor):
taxon_level: str
taxon_table_tsv: str
n_taxa: int
dstdir: str
df: pd.DataFrame
def main(
self,
taxon_level: str,
taxon_table_tsv: str,
n_taxa: int,
dstdir: str):
self.taxon_level = taxon_level
self.taxon_table_tsv = taxon_table_tsv
self.n_taxa = n_taxa
self.dstdir = dstdir
self.read_tsv()
self.pool_minor_taxa()
self.percentage_normalization()
self.save_tsv()
self.parcentage_barplot()
def read_tsv(self):
self.df = pd.read_csv(self.taxon_table_tsv, sep='\t', index_col=0)
def pool_minor_taxa(self):
self.df = PoolMinorFeatures(self.settings).main(
df=self.df,
n_major_features=self.n_taxa)
def percentage_normalization(self):
self.df = CountNormalization(self.settings).main(
df=self.df,
log_pseudocount=False,
by_sample_reads=True,
sample_reads_unit=100)
def save_tsv(self):
self.df.to_csv(f'{self.dstdir}/{self.taxon_level}-barplot.tsv', sep='\t')
def parcentage_barplot(self):
PercentageBarplot(self.settings).main(
data=self.df,
title=self.taxon_level,
output_png=f'{self.dstdir}/{self.taxon_level}-barplot.png'
)
class PoolMinorFeatures(Processor):
POOLED_FEATURE_NAME = 'Others'
ROW_SUM = 'Row Sum'
df: pd.DataFrame
n_major_features: int
def main(
self,
df: pd.DataFrame,
n_major_features: int) -> pd.DataFrame:
self.df = df
self.n_major_features = n_major_features
self.sum_each_row()
self.sort_by_sum()
self.pool_minor_features()
self.clean_up()
return self.df
def sum_each_row(self):
self.df[self.ROW_SUM] = np.sum(self.df.to_numpy(), axis=1)
def sort_by_sum(self):
self.df = self.df.sort_values(
by=self.ROW_SUM,
ascending=False)
def pool_minor_features(self):
if len(self.df) <= self.n_major_features:
return
minor_feature_df = self.df.iloc[self.n_major_features:] # extract minor features
self.df = self.df.iloc[0:self.n_major_features] # remove from main df
self.df.loc[self.POOLED_FEATURE_NAME] = np.sum(minor_feature_df, axis=0) # sum -> new row
def clean_up(self):
self.df = self.df.drop(columns=self.ROW_SUM)
class PercentageBarplot(Processor):
DPI = 300
Y_LABEL = 'Percentage'
X_LABEL = 'Sample'
X_LABEL_CHAR_WIDTH = 0.08
LEGEND_CHAR_WIDTH = 0.08
BAR_WIDTH = 0.3
FEATURE_HEIGHT = 0.3
COLORS = [
'lavenderblush',
'midnightblue',
'royalblue',
'cornflowerblue',
'purple',
'palevioletred',
'mediumvioletred',
'moccasin',
'firebrick',
'silver',
'rebeccapurple',
'turquoise',
'yellow',
'crimson',
'orangered',
'darkgreen',
]
data: pd.DataFrame
title: str
output_png: str
figsize: Tuple[float, float]
figure: plt.Figure
def main(
self,
data: pd.DataFrame,
title: str,
output_png: str):
self.data = data
self.title = title
self.output_png = output_png
self.set_figsize()
self.init_figure()
self.plot()
self.config_and_save()
def set_figsize(self):
self.__set_paddings()
w = (len(self.data.columns) * self.BAR_WIDTH) + self.horizontal_padding
h = (len(self.data.index) * self.FEATURE_HEIGHT) + self.vertical_padding
self.figsize = (w, h)
def __set_paddings(self):
max_x_label_length = pd.Series(self.data.columns).apply(len).max()
self.vertical_padding = max_x_label_length * self.X_LABEL_CHAR_WIDTH
max_legend_length = | pd.Series(self.data.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEpochsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_epochs_to_dataframe__single(self):
obj = fake_neo('Epoch', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False)
res2 = ep.multi_epochs_to_dataframe(obj, parents=True)
res3 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res4 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res7 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.epoch_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.durations), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
| assert_frame_equal(targ, res7) | pandas.util.testing.assert_frame_equal |
import sys
sys.path.append('../../')
import numpy as np
import pandas as pd
from tqdm import trange
_cache_path = '../src/d04_modeling/cache/'
_default_fname = 'value_function.pkl'
class KnapsackApprox:
"""
This algorithm finds a subset of items whose total weight does
not exceed W, with total value at most a (1+`eps`) factor below the maximum possible. This algorithm uses a
a dynamic programming algorithm for the case when the values are small, even if the weights may be big. This
algorithm has running time O(n^2v_max) with v_max the maximum value. This means that of the values range is
small (v_max is small) the approximation is not necesarry. This implementatio is based on the content from
Chapter 11 Section 11.8 of Algorithm Design by <NAME> and <NAME>.
"""
def __init__(self, eps, data: pd.DataFrame, value_col, weight_col, scale=True):
"""
Implementation of the algorithm Knapsack-Approx.
:param eps: approximation factorapproximate solution
:param data: data indexed by the names of the items (i.e. block or group identifiers)
:param value_col: column to be used as values for the items
:param weight_col: column to be used as weights for the items
:param scale: boolean to use approximation if values range is large
"""
data.sort_values(value_col, inplace=True)
self.value_col = value_col
self.weight_col = weight_col
self.v = data[value_col].rename('v').to_frame() # values of each item
self.w = data[weight_col] # weights of each item
n = len(data)
self.n = n
if scale:
v_max = self.v['v'].max()
b = (eps / (2 * n)) * v_max
print("Using scaling parameter b = %.4f" % b)
self.v['v_hat'] = np.ceil(self.v['v'].values / b).astype('int64')
else:
# TODO: Any considerations if I don't scale the parameters and still apply the ceil function?
# TODO: What is the approximation error in this case? (1 + eps) factor below the maximum possible.
# TODO: What if the values are already small and integer?
# TODO: How much can we improve by filtering out certain blocks befor hand?
self.v['v_hat'] = np.ceil(self.v['v']).astype('int64')
self.v['cumsum'] = self.v['v_hat'].cumsum()
self.value_function = np.zeros((n, self.v['cumsum'].iloc[-1]+1)) + np.nan
def solve(self):
"""
Use dynamic programming to compute the value function
:return:
"""
self.value_function[:, 0] = 0
for i in trange(self.n):
v_range = np.arange(1, self.v['cumsum'].iloc[i]+1)
w_i = self.w.iloc[i]
v_i = self.v['v_hat'].iloc[i]
for v in v_range:
if i == 0:
self.value_function[i, v] = w_i
continue
if v > self.v['cumsum'].iloc[i-1]:
self.value_function[i, v] = w_i + self.get_values(i-1, v-v_i)
else:
w1 = self.get_values(i-1, v)
w2 = w_i + self.get_values(i-1, max(0, v-v_i))
self.value_function[i, v] = min(w1, w2)
return None
def get_values(self, i, v):
"""
Query value function (weight) for a given item `i` and value `v`. In this case the value function is the the
minimum total weight of the combination of items 1..i that has total value `v`.
:param i: numerical index of item
:param v: accumulated value
:return:
"""
if i < 0:
return 0.
w = self.value_function[i, v]
if np.isfinite(w):
return w
else:
return 0.
def get_solution(self, w_max):
"""
Obtain solution set from max weight constraint.
:param w_max: maximum weight constraint
:return: maximum value and list of (real) index of items in the optimal subset
"""
solution_set = []
v_opt = np.max(np.argwhere(self.value_function[-1][:] <= w_max).flatten())
v = v_opt
i = self.value_function.shape[0]-1
while v > 0:
if i < 0:
raise Exception("Something is of with the saved value function... Try computing it again!")
w_i = self.w.iloc[i]
v_i = self.v['v_hat'].iloc[i]
w1 = self.get_values(i, v)
w2 = w_i + self.get_values(i-1, v-v_i)
if w1 == w2:
solution_set += [self.v.index[i]]
v -= v_i
i -= 1
return v_opt, solution_set
def get_value_per_weight(self):
"""
Query maximum values obtained for different maximum weights.
:return: pandas.DataFrame
"""
solution_weights = self.value_function[-1][:]
results = pd.Series(solution_weights, name='weights')
results.index.name = 'values'
results = results.to_frame().reset_index()
return results.groupby('weights').max()
def save_value_function(self, fname=None):
"""
Save value function solution
:param fname: file name
:return:
"""
if fname is None:
pd.DataFrame(self.value_function).to_pickle(_cache_path + _default_fname)
else:
pd.DataFrame(self.value_function).to_pickle(_cache_path + fname)
def load_value_function(self, fname=None):
"""
Load value function solution
:param fname: file name
:return:
"""
if fname is None:
df = pd.read_pickle(_cache_path + _default_fname)
else:
df = pd.read_pickle(_cache_path + fname)
self.value_function = df.values
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [12, 8]
plt.rcParams['figure.dpi'] = 100 # 200 e.g. is really fine, but slower
data = pd.read_csv("../../src/d01_data/knapsack_data.csv")
mask = data['focalRate'] > 0.0
solver = KnapsackApprox(eps=.5, data=data.loc[mask].copy(),
value_col='nFocalStudents',
weight_col='nOtherStudents',
scale=False)
solver.solve()
plt.imshow(pd.DataFrame(solver.value_function).fillna(0))
plt.show()
total_students = (data['nFocalStudents'].values + data['nOtherStudents'].values).sum()
fp_rate = 0.1
w_max = fp_rate * total_students
v_opt, solution_set = solver.get_solution(w_max=w_max)
solution_set = | pd.Index(solution_set, name=data.index.name) | pandas.Index |
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2021.10.18
#
# title: Unpacking pands read_HTML dataframe
# url: https://stackoverflow.com/questions/69608885/unpacking-pands-read-html-dataframe/69610319#69610319
# [Unpacking pands read_HTML dataframe](https://stackoverflow.com/questions/69608885/unpacking-pands-read-html-dataframe/69610319#69610319)
import selenium.webdriver
import pandas as pd
driver = selenium.webdriver.Firefox()
driver.get('https://www.oddsportal.com/american-football/usa/nfl')
# ---
all_results = []
date = None
all_rows = driver.find_elements_by_xpath('//table[@id="tournamentTable"]//tr')
for row in all_rows:
classes = row.get_attribute('class')
print('classes:', classes)
if classes == 'center nob-border':
date = row.find_element_by_tag_name('span').text.strip()
print('date:', date)
elif (classes == 'table-dummyrow') or ('hidden' in classes):
pass # skip empty rows
else:
if date:
all_cells = row.find_elements_by_xpath('.//td')
print('len(all_cells):', len(all_cells))
teams = all_cells[1].text.split(' - ')
if len(all_cells) == 5:
# row without score
row_values = [
date,
all_cells[0].text.strip(),
teams[0].strip(),
teams[1].strip(),
'',
all_cells[2].text.strip(),
all_cells[3].text.strip(),
all_cells[4].text.strip(),
]
else:
# row with score
row_values = [
date,
all_cells[0].text.strip(),
teams[0].strip(),
teams[1].strip(),
all_cells[2].text.strip(),
all_cells[3].text.strip(),
all_cells[4].text.strip(),
all_cells[5].text.strip(),
]
print('row:', row_values)
all_results.append(row_values)
print('-----------------------')
df = | pd.DataFrame(all_results, columns=['date', 'game_time', 'Team1', 'Team2', 'Score', '1', '2', 'B']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import annotations
import itertools
import collections
from abc import ABC, abstractmethod
from typing import (cast,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
TypeVar,
Union,
TYPE_CHECKING)
from functools import wraps
from dataclasses import dataclass, field
import numpy as np
import pandas as pd # type: ignore
import xarray as xr
from scipy import interpolate # type: ignore
from .base import _TimeStepResolver
from .edges import _map_to_edges_geoframe
from ..cases import CaseStudy
from ..types import Num, StrOrPath
from .._docs import docstringtemplate
if TYPE_CHECKING: # pragma: no cover
import numpy.typing as npt
# Generic for decorators
F = TypeVar('F', bound=Callable[..., Any])
def _extract(func: F) -> F:
@wraps(func)
def wrapper(self, t_step: int,
value: Num,
x: Optional[Sequence[Num]] = None,
y: Optional[Sequence[Num]] = None) -> xr.Dataset:
do_interp = sum((bool(x is not None),
bool(y is not None)))
if do_interp == 1:
raise RuntimeError("x and y must both be set")
t_step = self._resolve_t_step(t_step)
if t_step not in self._t_steps:
self._load_t_step(t_step)
ds = func(self, t_step, value, x, y)
if not do_interp: return ds
return ds.interp({"$x$": xr.DataArray(x),
"$y$": xr.DataArray(y)})
return cast(F, wrapper)
@dataclass
class _FacesDataClassMixin(_TimeStepResolver):
xmax: Num #: maximum range in x-direction, in metres
_t_steps: Dict[int, pd.Timestamp] = field(default_factory=dict,
init=False,
repr=False)
_frame: Optional[pd.DataFrame] = field(default=None,
init=False,
repr=False)
class Faces(ABC, _FacesDataClassMixin):
"""Class for extracting results on the faces of the simulation grid. Use in
conjunction with the :class:`.Result` class.
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> result.faces.extract_z(-1, -1) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$\\sigma$ ($x$, $y$) float64 -0.4994 -0.4994 -0.4994 ... -0.5 -0.5 -0.5
$u$ ($x$, $y$) float64 0.781 0.781 0.781 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.237e-18 1.423e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01472 -0.01472 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004802 0.004765 ... 0.003674 0.0036...
:param nc_path: path to the ``.nc`` file containing results
:param n_steps: number of time steps in the simulation
:param xmax: maximum range in x-direction, in metres
"""
@docstringtemplate
def extract_turbine_centre(self, t_step: int,
case: CaseStudy,
offset_x: Num = 0,
offset_y: Num = 0,
offset_z: Num = 0) -> xr.Dataset:
"""Extract data at the turbine centre, as defined in the given
:class:`.CaseStudy` object. Available data is:
* :code:`sigma`: sigma layer
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. For example:
>>> from snl_d3d_cec_verify import MycekStudy, Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> case = MycekStudy()
>>> result.faces.extract_turbine_centre(-1, case) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 1)
Coordinates:
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) ... 6
$y$ (dim_0) ... 3
Dimensions without coordinates: dim_0
Data variables:
$\\sigma$ (dim_0) float64 -0.4996
$u$ (dim_0) float64 0.7748
$v$ (dim_0) float64 -2.942e-17
$w$ (dim_0) float64 0.0002786
$k$ (dim_0) float64 0.004...
The position extracted can also be shifted using the ``offset_x``,
``offset_y`` and ``offset_z`` parameters.
:param t_step: Time step index
:param case: Case study from which to get turbine position
:param offset_x: Shift x-coordinate of extraction point, in metres.
Defaults to {offset_x}
:param offset_y: Shift y-coordinate of extraction point, in metres.
Defaults to {offset_y}
:param offset_z: Shift z-coordinate of extraction point, in metres.
Defaults to {offset_z}
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises ValueError: if the length of the :class:`.CaseStudy` object is
greater than one
:rtype: xarray.Dataset
"""
_check_case_study(case)
# Inform the type checker that we have Num for single value cases
turb_pos_z = cast(Num, case.turb_pos_z)
turb_pos_x = cast(Num, case.turb_pos_x)
turb_pos_y = cast(Num, case.turb_pos_y)
return self.extract_z(t_step,
turb_pos_z + offset_z,
[turb_pos_x + offset_x],
[turb_pos_y + offset_y])
@docstringtemplate
def extract_turbine_centreline(self, t_step: int,
case: CaseStudy,
x_step: Num = 0.5,
offset_x: Num = 0,
offset_y: Num = 0,
offset_z: Num = 0) -> xr.Dataset:
"""Extract data along the turbine centreline, from the turbine
position defined in the given :class:`.CaseStudy` object. Available
data is:
* :code:`k`: sigma layer
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. Use the ``x_step``
argument to control the frequency of samples. For example:
>>> from snl_d3d_cec_verify import MycekStudy, Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> case = MycekStudy()
>>> result.faces.extract_turbine_centreline(-1, case, x_step=1) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 13)
Coordinates:
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) float64 6.0 7.0 8.0 9.0 10.0 ... 14.0 15.0 16.0 17.0 18.0
$y$ (dim_0) ... 3 3 3 3 3 3 3 3 3 3 3 3 3
Dimensions without coordinates: dim_0
Data variables:
$\\sigma$ (dim_0) float64 -0.4996 -0.4996 -0.4996 ... -0.4999 -0.4999 nan
$u$ (dim_0) float64 0.7748 0.7747 0.7745 0.7745 ... 0.7759 0.7762 nan
$v$ (dim_0) float64 -2.942e-17 4.192e-17 9.126e-17 ... -8.523e-17 nan
$w$ (dim_0) float64 0.0002786 -0.0004764 0.0003097 ... -7.294e-05 nan
$k$ (dim_0) float64 0.004307 0.004229 0.004157 ... 0.003691 nan
The position extracted can also be shifted using the ``offset_x``,
``offset_y`` and ``offset_z`` parameters.
:param t_step: Time step index
:param case: Case study from which to get turbine position
:param x_step: Sample step, in metres. Defaults to {x_step}
:param offset_x: Shift x-coordinate of extraction point, in metres.
Defaults to {offset_x}
:param offset_y: Shift y-coordinate of extraction point, in metres.
Defaults to {offset_y}
:param offset_z: Shift z-coordinate of extraction point, in metres.
Defaults to {offset_z}
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises ValueError: if the length of the :class:`.CaseStudy` object is
greater than one
:rtype: xarray.Dataset
"""
_check_case_study(case)
# Inform the type checker that we have Num for single value cases
turb_pos_z = cast(Num, case.turb_pos_z)
turb_pos_x = cast(Num, case.turb_pos_x)
turb_pos_y = cast(Num, case.turb_pos_y)
x = np.arange(turb_pos_x + offset_x, self.xmax, x_step)
if np.isclose(x[-1] + x_step, self.xmax): x = np.append(x, self.xmax)
y = [turb_pos_y + offset_y] * len(x)
return self.extract_z(t_step, turb_pos_z + offset_z, list(x), y)
def extract_turbine_z(self, t_step: int,
case: CaseStudy,
offset_z: Num = 0) -> xr.Dataset:
"""Extract data from the z-plane interseting the turbine centre, as
defined in the given :class:`.CaseStudy` object, at the face centres.
Available data is:
* :code:`k`: sigma layer
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`.For example:
>>> from snl_d3d_cec_verify import MycekStudy, Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> case = MycekStudy()
>>> result.faces.extract_turbine_z(-1, case) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$\\sigma$ ($x$, $y$) float64 -0.4994 -0.4994 -0.4994 ... -0.5 -0.5 -0.5
$u$ ($x$, $y$) float64 0.781 0.781 0.781 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.237e-18 1.423e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01472 -0.01472 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004802 0.004765 ... 0.003674 0.0036...
The z-plane can be shifted using the ``offset_z`` parameter.
:param t_step: Time step index
:param case: Case study from which to get turbine position
:param offset_z: Shift z-coordinate of extraction point, in metres.
Defaults to {offset_z}
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises ValueError: if the length of the :class:`.CaseStudy` object is
greater than one
:rtype: xarray.Dataset
"""
_check_case_study(case)
turb_pos_z = cast(Num, case.turb_pos_z)
return self.extract_z(t_step, turb_pos_z + offset_z)
@_extract
def extract_z(self, t_step: int,
z: Num,
x: Optional[Sequence[Num]] = None,
y: Optional[Sequence[Num]] = None) -> xr.Dataset:
"""Extract data on the plane at the given z-level. Available data is:
* :code:`sigma`: sigma value
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. If the ``x`` and
``y`` parameters are defined, then the results are interpolated onto
the given coordinates. For example:
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> x = [6, 7, 8, 9, 10]
>>> y = [2, 2, 2, 2, 2]
>>> result.faces.extract_z(-1, -1, x, y) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 5)
Coordinates:
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) ... 6 7 8 9 10
$y$ (dim_0) ... 2 2 2 2 2
Dimensions without coordinates: dim_0
Data variables:
$\\sigma$ (dim_0) float64 -0.4996 -0.4996 -0.4996 -0.4997 -0.4997
$u$ (dim_0) float64 0.7748 0.7747 0.7745 0.7745 0.7746
$v$ (dim_0) float64 -3.877e-18 4.267e-17 5.452e-17 5.001e-17 8.011e-17
$w$ (dim_0) float64 0.0002786 -0.0004764 ... -0.0002754 0.0003252
$k$ (dim_0) float64 0.004317 0.0042... 0.00416... 0.00409... 0.00403...
If ``x`` and ``y`` are not given, then the results are returned at the
face centres.
>>> result.faces.extract_z(-1, -1) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$z$ ... -1
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$\\sigma$ ($x$, $y$) float64 -0.4994 -0.4994 -0.4994 ... -0.5 -0.5 -0.5
$u$ ($x$, $y$) float64 0.781 0.781 0.781 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.237e-18 1.423e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01472 -0.01472 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004802 0.004765 ... 0.003674 0.0036...
:param t_step: Time step index
:param z: z-level at which to extract data
:param x: x-coordinates on which to interpolate data
:param y: y-coordinates on which to interpolate data
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises RuntimeError: if only ``x`` or ``y`` is set
:rtype: xarray.Dataset
"""
return _faces_frame_to_slice(self._frame,
self._t_steps[t_step],
"z",
z)
@_extract
def extract_sigma(self, t_step: int,
sigma: float,
x: Optional[Sequence[Num]] = None,
y: Optional[Sequence[Num]] = None) -> xr.Dataset:
"""Extract data on the plane at the given sigma-level. Available
data is:
* :code:`z`: the z-level, in metres
* :code:`u`: velocity in the x-direction, in metres per second
* :code:`v`: velocity in the x-direction, in metres per second
* :code:`w`: velocity in the x-direction, in metres per second
* :code:`k`: turbulent kinetic energy, in metres squared per second
squared
Results are returned as a :class:`xarray.Dataset`. If the ``x`` and
``y`` parameters are defined, then the results are interpolated onto
the given coordinates. For example:
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> x = [6, 7, 8, 9, 10]
>>> y = [2, 2, 2, 2, 2]
>>> result.faces.extract_sigma(-1, -0.5, x, y) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: (dim_0: 5)
Coordinates:
$\\sigma$ ... -0.5
time datetime64[ns] 2001-01-01T01:00:00
$x$ (dim_0) ... 6 7 8 9 10
$y$ (dim_0) ... 2 2 2 2 2
Dimensions without coordinates: dim_0
Data variables:
$z$ (dim_0) float64 -1.001 -1.001 -1.001 -1.001 -1.001
$u$ (dim_0) float64 0.7747 0.7746 0.7744 0.7745 0.7745
$v$ (dim_0) float64 -3.88e-18 4.267e-17 5.452e-17 5.002e-17 8.013e-17
$w$ (dim_0) float64 0.0002791 -0.0004769 ... -0.0002756 0.0003256
$k$ (dim_0) float64 0.004... 0.0042... 0.0041... 0.004... 0.0040...
If ``x`` and ``y`` are not given, then the results are returned at the
face centres.
>>> result.faces.extract_sigma(-1, -0.5) #doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ($x$: 18, $y$: 4)
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
$\\sigma$ ... -0.5
time datetime64[ns] 2001-01-01T01:00:00
Data variables:
$z$ ($x$, $y$) float64 -1.001 -1.001 -1.001 -1.001 ... -1.0 -1.0 -1.0
$u$ ($x$, $y$) float64 0.7809 0.7809 0.7809 ... 0.7763 0.7763 0.7763
$v$ ($x$, $y$) float64 -3.29e-18 1.419e-17 ... -8.598e-17 -4.824e-17
$w$ ($x$, $y$) float64 -0.01473 -0.01473 ... 0.001343 0.001343
$k$ ($x$, $y$) float64 0.004809 0.004772 ... 0.003674 0.0036...
:param t_step: Time step index
:param sigma: sigma-level at which to extract data
:param x: x-coordinates on which to interpolate data
:param y: y-coordinates on which to interpolate data
:raises IndexError: if the time-step index (``t_step``) is out of
range
:raises RuntimeError: if only ``x`` or ``y`` is set
:rtype: xarray.Dataset
"""
return _faces_frame_to_slice(self._frame,
self._t_steps[t_step],
"sigma",
sigma)
def extract_depth(self, t_step: int) -> xr.DataArray:
"""Extract the depth, in meters, at each of the face centres.
Results are returned as a :class:`xarray.DataArray`. For example:
>>> from snl_d3d_cec_verify import Result
>>> data_dir = getfixture('data_dir')
>>> result = Result(data_dir)
>>> result.faces.extract_depth(-1)
<xarray.DataArray 'depth' ($x$: 18, $y$: 4)>
array([[2.00234445, 2.00234445, 2.00234445, 2.00234445],
[2.00224624, 2.00224624, 2.00224624, 2.00224624],
[2.00212823, 2.00212823, 2.00212823, 2.00212823],
[2.00201275, 2.00201275, 2.00201275, 2.00201275],
[2.00188605, 2.00188605, 2.00188605, 2.00188605],
[2.00176218, 2.00176218, 2.00176218, 2.00176218],
[2.00163089, 2.00163089, 2.00163089, 2.00163089],
[2.00150178, 2.00150178, 2.00150178, 2.00150178],
[2.0013675 , 2.0013675 , 2.0013675 , 2.0013675 ],
[2.00123502, 2.00123502, 2.00123502, 2.00123502],
[2.00109849, 2.00109849, 2.00109849, 2.00109849],
[2.00096352, 2.00096352, 2.00096352, 2.00096352],
[2.0008259 , 2.0008259 , 2.0008259 , 2.0008259 ],
[2.00068962, 2.00068962, 2.00068962, 2.00068962],
[2.0005524 , 2.0005524 , 2.0005524 , 2.0005524 ],
[2.00041653, 2.00041653, 2.00041653, 2.00041653],
[2.00027887, 2.00027887, 2.00027887, 2.00027887],
[2.00014281, 2.00014281, 2.00014281, 2.00014281]])
Coordinates:
* $x$ ($x$) float64 0.5 1.5 2.5 3.5 4.5 5.5 ... 13.5 14.5 15.5 16.5 17.5
* $y$ ($y$) float64 1.5 2.5 3.5 4.5
time datetime64[ns] 2001-01-01T01:00:00
:param t_step: Time step index
:raises IndexError: if the time-step index (``t_step``) is out of
range
:rtype: xarray.DataArray
"""
t_step = self._resolve_t_step(t_step)
if t_step not in self._t_steps:
self._load_t_step(t_step)
return _faces_frame_to_depth(self._frame,
self._t_steps[t_step])
def _load_t_step(self, t_step: int):
t_step = self._resolve_t_step(t_step)
if t_step in self._t_steps: return
frame = self._get_faces_frame(t_step)
if self._frame is None:
self._frame = frame
else:
self._frame = pd.concat([self._frame, frame],
ignore_index=True)
self._t_steps[t_step] = pd.Timestamp(frame["time"].unique().take(0))
@abstractmethod
def _get_faces_frame(self, t_step: int) -> pd.DataFrame:
pass # pragma: no cover
def _check_case_study(case: CaseStudy):
if len(case) != 1:
raise ValueError("case study must have length one")
def _faces_frame_to_slice(frame: pd.DataFrame,
sim_time: pd.Timestamp,
key: str,
value: Num) -> xr.Dataset:
valid_keys = ['z', 'sigma']
if key not in valid_keys:
keys_msg = ", ".join(valid_keys)
err_msg = f"Given key is not valid. Choose from {keys_msg}"
raise RuntimeError(err_msg)
valid_keys.remove(key)
other_key = valid_keys[0]
frame = frame.set_index(['x', 'y', 'time'])
frame = frame.xs(sim_time, level=2)
data = collections.defaultdict(list)
remove_nans = lambda a: a[:, ~np.isnan(a).any(axis=0)]
for (x, y), group in frame.groupby(level=[0, 1]):
cols = ["z", "sigma", "u", "v", "w"]
if "tke" in group: cols.append("tke")
group = group.reset_index(drop=True)
group_values = group[cols].to_numpy().T
zsig = group_values[:2, :]
zsig = remove_nans(zsig)
if key == "z":
get_sigma = interpolate.interp1d(zsig[0, :],
zsig[1, :],
fill_value="extrapolate")
sigma = float(get_sigma(value))
other = sigma
else:
get_z = interpolate.interp1d(zsig[1, :],
zsig[0, :],
fill_value="extrapolate")
other = float(get_z(value))
sigma = value
sigvel = group_values[1:5, :]
sigvel = remove_nans(sigvel)
get_vel = interpolate.interp1d(sigvel[0, :],
sigvel[1:, :],
fill_value="extrapolate")
vel = get_vel(sigma)
if "tke" in group:
sigtke = group_values[[1, 5], :]
sigtke = remove_nans(sigtke)
get_tke = interpolate.interp1d(sigtke[0, :],
sigtke[1:, :],
fill_value="extrapolate")
tke = get_tke(sigma)
data["x"].append(x)
data["y"].append(y)
data[other_key].append(other)
data["u"].append(vel[0])
data["v"].append(vel[1])
data["w"].append(vel[2])
if "tke" in group:
data["tke"].append(tke[0])
zframe = pd.DataFrame(data)
zframe = zframe.set_index(['x', 'y'])
ds = zframe.to_xarray()
ds = ds.assign_coords({key: value})
ds = ds.assign_coords({"time": sim_time})
name_map = {"z": "$z$",
"x": "$x$",
"y": "$y$",
"u": "$u$",
"v": "$v$",
"w": "$w$",
"sigma": r"$\sigma$"}
if "tke" in data: name_map["tke"] = "$k$"
ds = ds.rename(name_map)
return ds
def _faces_frame_to_depth(frame: pd.DataFrame,
sim_time: pd.Timestamp) -> xr.DataArray:
frame = frame[['x', 'y', 'sigma', 'time', 'depth']]
frame = frame.dropna()
sigma = frame["sigma"].unique().take(0)
frame = frame.set_index(['x', 'y', 'sigma', 'time'])
frame = frame.xs((sigma, sim_time), level=(2, 3))
ds = frame.to_xarray()
ds = ds.assign_coords({"time": sim_time})
ds = ds.rename({"x": "$x$",
"y": "$y$"})
return ds.depth
class _FMFaces(Faces):
def _get_faces_frame(self, t_step: int) -> pd.DataFrame:
return _map_to_faces_frame_with_tke(self.nc_path, t_step)
def _map_to_faces_frame_with_tke(map_path: StrOrPath,
t_step: int = None) -> pd.DataFrame:
faces = _map_to_faces_frame(map_path, t_step)
edges = _map_to_edges_geoframe(map_path, t_step)
times = faces["time"].unique()
facesi = faces.set_index("time")
edgesi = edges.set_index("time")
faces_final = pd.DataFrame()
for time in times:
facest = facesi.loc[time]
edgest = edgesi.loc[time]
facest = facest.reset_index(drop=True)
edgest = edgest.reset_index(drop=True)
edgest["x"] = edgest['geometry'].apply(
lambda line: np.array(line.centroid.coords[0])[0])
edgest["y"] = edgest['geometry'].apply(
lambda line: np.array(line.centroid.coords[0])[1])
edgesdf = pd.DataFrame(edgest[["x",
"y",
"sigma",
"turkin1",
"f0",
"f1"]])
facest = facest.set_index(["x", "y", "sigma"])
facest = facest.sort_index()
x = facest.index.get_level_values(0).unique().values
y = facest.index.get_level_values(1).unique().values
grid_x, grid_y = np.meshgrid(x, y)
facest_new = facest.copy()
for sigma, group in edgesdf.groupby(by="sigma"):
# Fill missing values
groupna = group[ | pd.isna(group["turkin1"]) | pandas.isna |
import gc
import glob
import os.path
import numpy as np
import pandas as pd
import torch
from sentence_transformers import SentenceTransformer, util
from torch import nn
from bin.inference.chunks import chunks
from bin.transformers.concat_regression import ConcatRegression
from bin.file_utils import rm_and_new_folder
from bin.random.seed_everything import seed_everything
from bin.checkpoints.upload_to_kaggle import (
kaggle_new_dataset_version,
kaggle_get_metadata,
)
from bin.checkpoints.wandb_download_chekpoints import download
class Model(nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, text, device):
return self.model(text, device)
def get_corpora(min_len=21, max_len=5000):
jigsaw_bias = pd.read_csv(
"data/jigsaw-unintended-bias-in-toxicity-classification/train.csv"
)
toxic_task = | pd.read_csv("data/toxictask/task_a_distant.tsv", sep="\t") | pandas.read_csv |
"""
En este archivo se encuentran funciones auxiliares usadas para
actualizar día a día los datos.
"""
from datetime import datetime, timedelta
import os
import time
import logging
from alpha_vantage.techindicators import TechIndicators
import pandas as pd
import numpy as np
import pandas_datareader.data as web
import constants as c
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def get_symbols():
"""
Lee los índices bursátiles del archivo aux_symbols.
Return:
-------
np.array:
Array con los índices cargados
"""
df = pd.read_csv(f"{c.PATH_SYMBOL}aux_symbols.csv", names=['description', 'symbol'])
return df['symbol'].values
def failed_symbols(symbol):
"""
Esta función es para guardar en un archivo los índices bursátiles que han fallado.
Parámetros:
----------
symbol str:
Nombre de la acción en bolsa.
"""
df = pd.DataFrame({'symbol': [symbol]})
if os.path.isfile(f"{c.PATH_SYMBOL}failed_symbols.csv"):
df.to_csv(f"{c.PATH_SYMBOL}failed_symbols.csv", index=False, header=False, mode='a')
else:
df.to_csv(f"{c.PATH_SYMBOL}failed_symbols.csv", index=False, header=False)
def get_daily(symbol, end=None, is_save=False):
"""
Esta función es para descargar todos los indicadores técnicos.
Si usamos una API_KEY gratuita de ALPHA VANTAGE tenemos que
descomentar los time.sleep() de la función para que no de un
problema de peticiones; en el caso de una API_KEY premium no haría
falta descomentarlo.
Parámetros:
----------
symbol str:
Nombre de la acción en bolsa.
end str or None:
Parámetro para decidir si descargar todos los datos hasta el día
de hoy o hasta una fecha dada. Por defecto es None lo que indica
que se descarga hasta el día de hoy.
is_save bool:
Booleano para decidir si guardar o no los datos
descargados. Por defecto False no guarda los
datos descargados
"""
# Si existe el archivo tomar la ultima fecha de datos para actualizarlo
# en caso negativo abrir un archivo nuevo
if os.path.isfile(f"{c.PATH_SAVE_CLOSE}{symbol}.csv"):
df = pd.read_csv(f"{c.PATH_SAVE_CLOSE}{symbol}.csv", names=c.COLUMNS)
df = df[df['symbol'] == symbol]
df['date'] = pd.to_datetime(df['date'])
ld = df['date'].tail(1)
start = datetime(ld.dt.year, ld.dt.month, ld.dt.day) + timedelta(days=1)
else:
start = datetime(2006, 1, 1)
# Si tomar como ultima fecha de datos el día de hoy o uno dado por
# parámetro
if end is None:
end = datetime.today()
else:
pass
# Algoritmo para obtener los precios de cierre diarios desde un inicio
# hasta una fecha final
data = web.DataReader(symbol, "av-daily", start=start, end=end, api_key=c.ALPHA_VAN_KEY)
data.reset_index(inplace=True)
data['symbol'] = symbol
data.rename(columns={'index': 'date'}, inplace=True)
data['date'] = pd.to_datetime(data['date'])
# Guardar o no los resultados
if is_save:
data[c.COLUMNS].to_csv(f"{c.PATH_SAVE_CLOSE}{symbol}.csv", mode='a', index=False, header=False)
def get_technical(symbol, is_save=False):
"""
Esta función es para descargar todos los indicadores técnicos.
Si usamos una API_KEY gratuita de ALPHA VANTAGE tenemos que
descomentar los time.sleep() de la función para que no de un
problema de peticiones; en el caso de una API_KEY premium no haría
falta descomentarlo.
Parámetros:
----------
symbol str:
Nombre de la acción en bolsa.
is_save bool:
Booleano para decidir si guardar o no los datos
descargados. Por defecto False no guarda los
datos descargados
"""
try:
# Comprueba si ya existe o no el archivo y en el caso de que
# si exista guarda solo los días que no estén descargados
if os.path.isfile(f"{c.PATH_SAVE_TECH}{symbol}.csv"):
df = pd.read_csv(f"{c.PATH_SAVE_TECH}{symbol}.csv", names=c.COLUMNS)
df = df[df['symbol'] == symbol]
df['date'] = pd.to_datetime(df['date'])
ld = df['date'].tail(1)
last = datetime(ld.dt.year, ld.dt.month, ld.dt.day)
else:
last = None
techindc = list()
# Descarga los datos de indicadores técnicos.
ti = TechIndicators(key=c.ALPHA_VAN_KEY, output_format='pandas')
init = time.time()
macd = ti.get_macd(symbol, interval='daily')[0]
techindc.append(macd)
stoch = ti.get_stoch(symbol, interval='daily')[0]
techindc.append(stoch)
ad = ti.get_ad(symbol, interval='daily')[0]
techindc.append(ad)
obv = ti.get_obv(symbol, interval='daily')[0]
techindc.append(obv)
# time.sleep(60)
sma = ti.get_sma(symbol, interval='daily', time_period=50)[0]
sma.columns = [f"{c}50"for c in sma.columns]
techindc.append(sma)
bbands = ti.get_bbands(symbol, interval='daily', time_period=28)[0]
bbands.columns = [f"{c}28"for c in bbands.columns]
techindc.append(bbands)
for tp in c.TIME_PERIOD:
rsi = ti.get_rsi(symbol, interval='daily', time_period=tp)[0]
rsi.columns = [f"{c}{tp}"for c in rsi.columns]
techindc.append(rsi)
adx = ti.get_adx(symbol, interval='daily', time_period=tp)[0]
adx.columns = [f"{c}{tp}"for c in adx.columns]
techindc.append(adx)
# time.sleep(60)
cci = ti.get_cci(symbol, interval='daily', time_period=tp)[0]
cci.columns = [f"{c}{tp}"for c in cci.columns]
techindc.append(cci)
aroon = ti.get_aroon(symbol, interval='daily', time_period=tp)[0]
aroon.columns = [f"{c}{tp}"for c in aroon.columns]
techindc.append(aroon)
df_techindc = | pd.concat(techindc, axis=1, join='inner') | pandas.concat |
"""
utility for working with DataFrames
"""
import pandas as pd
import numpy as np
class Edit:
"""
this class lets you edit a dataframe
"""
def __init__(self,df = pd.DataFrame(np.ones(5))):
self.df = df
def add_col(self,df,lst,name = "New_column"):
"""
this function will add a new column to the end of a dataframe
Parameters:
df: a dataframe
lst: a list of values to use as column cells
name: the name of the column
"""
series = | pd.Series(lst) | pandas.Series |
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
from seffaflik.elektrik import santraller as __santraller
__first_part_url = "production/"
def organizasyonlar():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyon bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyon Bilgileri(Id, Adı, EIC Kodu, Kısa Adı, Durum)
"""
try:
particular_url = __first_part_url + "dpp-organization"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["organizations"])
df.rename(index=str,
columns={"organizationId": "Id", "organizationName": "Adı",
"organizationETSOCode": "EIC Kodu", "organizationShortName": "Kısa Adı",
"organizationStatus": "Durum"},
inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def organizasyon_veris_cekis_birimleri(eic):
"""
İlgili eic değeri için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyonun uzlaştırmaya
esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
eic : metin formatında organizasyon eic kodu
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonun UEVÇB Bilgileri(Id, Adı, EIC Kodu)
"""
if __dogrulama.__kgup_girebilen_organizasyon_dogrulama(eic):
try:
particular_url = __first_part_url + "dpp-injection-unit-name?organizationEIC=" + eic
json = __make_requests(particular_url)
df_unit = __pd.DataFrame(json["body"]["injectionUnitNames"])
df_unit.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu"}, inplace=True)
df_unit = df_unit[["Id", "Adı", "EIC Kodu"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df_unit
def tum_organizasyonlar_veris_cekis_birimleri():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyon ve bu organizasyonların
uzlaştırmaya esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonlar ve UEVÇB Bilgileri(Org Id, Org Adı, Org EIC Kodu, Org Kısa Adı, Org Durum, UEVÇB Id,
UEVÇB Adı, UEVÇB EIC Kodu)
"""
list_org = organizasyonlar()[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]].to_dict("records")
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.map(__organizasyon_cekis_birimleri, list_org, chunksize=1)
return __pd.concat(list_df_unit).reset_index(drop=True)
def kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı kesinleşmiş günlük üretim planı (KGÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KGUP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle
,Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "dpp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların saatlik
KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların
uzlaştırmaya esas veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı emre amade kapasite (EAK) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için eak bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
EAK (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle,
Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "aic" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = | __pd.DataFrame(json["body"]["aicList"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import json
import numpy as np
import pandas as pd
import sklearn
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
# Access data store
data_store = pd.HDFStore("processed_data.h5")
# Retrieve data using key
match_df = data_store["preprocessed_df"]
data_store.close()
match_df.head()
match_df["Winner_team"] = match_df["Winner"]
for ind in match_df.index:
if match_df["Winner"][ind] == match_df["Team_1"][ind]:
match_df["Winner_team"][ind] = 1
elif match_df["Winner"][ind] == match_df["Team_2"][ind]:
match_df["Winner_team"][ind] = 2
else:
match_df["Winner_team"][ind] = 0
match_df["Winner_team"].value_counts()
match_df.head()
np.random.seed(60)
"""##Calculating Net Run Rate
###Import Data
"""
attributes = pd.read_csv("../Data/attributes.csv")
attributes.head()
scorecard = open("../Data/scorecard.json",)
scorecard_data = json.load(scorecard)
tmap = open("../Data/tmap.json",)
tmap_data = json.load(tmap)
"""###Get NNR"""
match = match_df.copy()
match["NRR_team1"] = ""
match["NRR_team2"] = ""
name_to_code = {
"Afghanistan": "AFG",
"Australia": "AUS",
"Bangladesh": "BAN",
"England": "ENG",
"India": "IND",
"Ireland": "IRE",
"Kenya": "KEN",
"Netherlands": "NED",
"New Zealand": "NZL",
"Pakistan": "PAK",
"South Africa": "SAF",
"Scotland": "SCO",
"Sri Lanka": "SRL",
"West Indies": "WIN",
"Zimbabwe": "ZIM",
}
skip_keys = ["1282", "1761", "1765", "1770", "1862", "1866", "2528"]
def check_allOut(scorecard_data, matchCode, team_num):
bat = "BATTING" + str(team_num)
dismissal = [i[1] for i in scorecard_data[matchCode][bat]]
if "not out" in dismissal or "" in dismissal:
return False
return True
def get_totalOvers(scorecard_data, matchCode, team_num):
bat = "BATTING" + str(team_num)
balls = [i[3] for i in scorecard_data[matchCode][bat] if i[3] != -1]
overs = sum(balls) / 6
return overs
for ind in match.index:
if match["Winner_team"][ind] == 0:
match["NRR_team1"][ind] = 0
match["NNR_team2"][ind] = 0
else:
team_num = 2
match_code = str(match["MatchCode"][ind])
if match_code in skip_keys:
continue
order = scorecard_data[match_code]["ORDER"]
if name_to_code[order[1]] == match["Team_2"][ind]:
team_num = 1
runRate_team1 = match["Score_1"][ind] / 50
if check_allOut(scorecard_data, match_code, team_num):
runRate_team2 = match["Score_2"][ind] / 50
else:
if match["Winner_team"][ind] == 2:
runRate_team2 = match["Score_2"][ind] / get_totalOvers(
scorecard_data, match_code, team_num
)
else:
runRate_team2 = match["Score_2"][ind] / 50
match["NRR_team1"][ind] = runRate_team1 - runRate_team2
match["NRR_team2"][ind] = runRate_team2 - runRate_team1
match.head()
len(match)
match = match[~match["MatchCode"].isin(skip_keys)]
len(match)
"""###Store the NNR dataframe"""
# Save the final dataframe
data_store = pd.HDFStore("nnr_data.h5")
data_store["preprocessed_df"] = match
data_store.close()
"""#Flipped Dataset"""
# Access data store
data_store = | pd.HDFStore("nnr_data.h5") | pandas.HDFStore |
"""
json 불러와서 캡션 붙이는 것
"""
import json
import pandas as pd
path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json'
with open(path) as question:
question = json.load(question)
# question['questions'][0]
# question['questions'][1]
# question['questions'][2]
df = pd.DataFrame(question['questions'])
df
caption_path = './datasets/caption/vis_st_trainval.json'
with open(caption_path) as cap:
cap = json.load(cap)
df_cap = pd.DataFrame(cap)
df_cap
df_addcap = pd.merge(df, df_cap, how='left', on='image_id')
del df_addcap['file_path']
########################################################################################################################
"""
pandas to json
"""
df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table')
with open('./datasets/caption/train_cap2.json') as train_cap:
train_cap = json.load(train_cap)
########################################################################################################################
########################################################################################################################
"""
answer + cap
"""
path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json'
path = './datasets/vqa/v2_mscoco_val2014_annotations.json'
with open(path) as answer:
answer = json.load(answer)
answer['annotations'][0]
df_ans = | pd.DataFrame(answer['annotations']) | pandas.DataFrame |
# 라이브러리 불러오기
import os
import pandas as pd
import numpy as np
from data.rle_encode import rle_encode
from data.dicom_reader import *
from skimage.io import imread
import math
# 경로 지정 (폴더 위치에 따라 수정이 필요함 *현재는 바탕화면 기준)
path_nrm = "./data/dataset512/train"
path_test = "./data/dataset512/test"
path_test_mask = "./data/dataset512/mask_predicted"
file_list = os.listdir(path_nrm)
file_list_test = os.listdir(path_test)
file_list_test_mask = os.listdir(path_test_mask)
# fname,fold,exist_labels
# 1.2.276.0.7230010.3.1.4.8323329.1000.1517875165.878027.png,0,0
# 1.2.276.0.7230010.3.1.4.8323329.10001.1517875220.930580.png,4,0
# data frame 생성
df1 = pd.DataFrame(file_list, columns=['fname'])
df1['fold'] = 0
df1['exist_labels'] = 1
for i in range(len(df1)):
df1.loc[i, 'fold'] = i%5
# csv 파일로 저장
df1 = df1.reset_index(drop=True)
df1.to_csv('./train_folds_5.csv', index=False)
# ImageId,EncodedPixels
# case183_1,1 1
# case183_2,1 1
df2 = | pd.DataFrame(file_list_test, columns=['ImageId']) | pandas.DataFrame |
# coding: utf-8
# # Project One: Data Visualization, Descriptive Statistics, Confidence Intervals
#
# This notebook contains the step-by-step directions for Project One. It is very important to run through the steps in order. Some steps depend on the outputs of earlier steps. Once you have completed the steps in this notebook, be sure to write your summary report.
#
#
# You are a data analyst for a basketball team and have access to a large set of historical data that you can use to analyze performance patterns. The coach of the team and your management have requested that you use descriptive statistics and data visualization techniques to study distributions of key performance metrics that are included in the data set. These data-driven analytics will help make key decisions to improve the performance of the team. You will use the Python programming language to perform the statistical analyses and then prepare a report of your findings to present for the team’s management. Since the managers are not data analysts, you will need to interpret your findings and describe their practical implications.
#
#
# There are four important variables in the data set that you will study in Project One.
#
# | <div style="text-align: left"> Variable </div> | <div style="text-align: left"> What does it represent? </div> |
# | -- | -- |
# | <div style="text-align: left"> pts </div> | <div style="text-align: left"> Points scored by the team in a game </div> |
# | <div style="text-align: left"> elo_n </div> | <div style="text-align: left"> A measure of the relative skill level of the team in the league </div> |
# | <div style="text-align: left"> year_id </div> | <div style="text-align: left"> Year when the team played the games </div> |
# | <div style="text-align: left"> fran_id </div> | <div style="text-align: left"> Name of the NBA team </div> |
#
#
# The ELO rating, represented by the variable **elo_n**, is used as a measure of the relative skill of a team. This measure is inferred based on the final score of a game, the game location, and the outcome of the game relative to the probability of that outcome. The higher the number, the higher the relative skill of a team.
#
#
# In addition to studying data on your own team, your management has assigned you a second team so that you can compare its performance with your own team's.
#
# | <div style="text-align: left"> Team </div> | <div style="text-align: left"> What does it represent? </div> |
# | -- | -- |
# | <div style="text-align: left"> Your Team </div> | <div style="text-align: left"> This is the team that has hired you as an analyst. This is the team that you will pick below. See Step 2.</div> |
# | <div style="text-align: left"> Assigned Team </div> | <div style="text-align: left"> This is the team that the management has assigned to you to compare against your team. See Step 1. </div> |
#
#
# Reminder: It may be beneficial to review the summary report template for Project One prior to starting this Python script. That will give you an idea of the questions you will need to answer with the outputs of this script.
#
#
# **--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------**
# ## Step 1: Data Preparation & the Assigned Team
# This step uploads the data set from a CSV file. It also selects the assigned team for this analysis. Do not make any changes to the code block below.
#
# 1. The **assigned team** is the <font color='blue'><strong>Chicago Bulls</strong></font> from the years <font color='blue'><strong>1996-1998</strong> </font>
#
# Click the block of code below and hit the **Run** button above.
# In[1]:
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
from IPython.display import display, HTML
nba_orig_df = | pd.read_csv('nbaallelo.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
data = | pd.read_csv('201213177_data.csv', engine='python') | pandas.read_csv |
"""Mid-level helper functions for AutoTS."""
import random
import numpy as np
import pandas as pd
import datetime
import json
from hashlib import md5
from autots.evaluator.metrics import PredictionEval
from autots.tools.transform import RandomTransform, GeneralTransformer, shared_trans
from autots.models.ensemble import EnsembleForecast, generalize_horizontal
from autots.models.model_list import no_params, recombination_approved, no_shared
from itertools import zip_longest
from autots.models.basics import (
MotifSimulation,
LastValueNaive,
AverageValueNaive,
SeasonalNaive,
ZeroesNaive,
)
from autots.models.statsmodels import (
GLS,
GLM,
ETS,
ARIMA,
UnobservedComponents,
DynamicFactor,
VAR,
VECM,
VARMAX,
)
def create_model_id(
model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {}
):
"""Create a hash ID which should be unique to the model parameters."""
str_repr = (
str(model_str) + json.dumps(parameter_dict) + json.dumps(transformation_dict)
)
str_repr = ''.join(str_repr.split())
hashed = md5(str_repr.encode('utf-8')).hexdigest()
return hashed
def ModelMonster(
model: str,
parameters: dict = {},
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
startTimeStamps=None,
forecast_length: int = 14,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
**kwargs,
):
"""Directs strings and parameters to appropriate model objects.
Args:
model (str): Name of Model Function
parameters (dict): Dictionary of parameters to pass through to model
"""
model = str(model)
if model == 'ZeroesNaive':
return ZeroesNaive(frequency=frequency, prediction_interval=prediction_interval)
elif model == 'LastValueNaive':
return LastValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
elif model == 'AverageValueNaive':
return AverageValueNaive(
frequency=frequency, prediction_interval=prediction_interval, **parameters
)
elif model == 'SeasonalNaive':
return SeasonalNaive(
frequency=frequency, prediction_interval=prediction_interval, **parameters
)
elif model == 'GLS':
return GLS(frequency=frequency, prediction_interval=prediction_interval)
elif model == 'GLM':
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'ETS':
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'ARIMA':
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'FBProphet':
from autots.models.prophet import FBProphet
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'RollingRegression':
from autots.models.sklearn import RollingRegression
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'UnobservedComponents':
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'DynamicFactor':
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
**parameters,
)
return model
elif model == 'VAR':
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
**parameters,
)
return model
elif model == 'VECM':
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
**parameters,
)
return model
elif model == 'VARMAX':
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
**parameters,
)
return model
elif model == 'GluonTS':
from autots.models.gluonts import GluonTS
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
**parameters,
)
return model
elif model == 'TSFreshRegressor':
from autots.models.tsfresh import TSFreshRegressor
if parameters == {}:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
**parameters,
)
else:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
max_timeshift=parameters['max_timeshift'],
regression_model=parameters['regression_model'],
feature_selection=parameters['feature_selection'],
)
return model
elif model == 'MotifSimulation':
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'WindowRegression':
from autots.models.sklearn import WindowRegression
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
**parameters,
)
return model
elif model == 'TensorflowSTS':
from autots.models.tfp import TensorflowSTS
if parameters == {}:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
seasonal_periods=parameters['seasonal_periods'],
ar_order=parameters['ar_order'],
trend=parameters['trend'],
fit_method=parameters['fit_method'],
num_steps=parameters['num_steps'],
)
return model
elif model == 'TFPRegression':
from autots.models.tfp import TFPRegression
if parameters == {}:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
kernel_initializer=parameters['kernel_initializer'],
epochs=parameters['epochs'],
batch_size=parameters['batch_size'],
optimizer=parameters['optimizer'],
loss=parameters['loss'],
dist=parameters['dist'],
regression_type=parameters['regression_type'],
)
return model
elif model == 'ComponentAnalysis':
from autots.models.sklearn import ComponentAnalysis
if parameters == {}:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
model=parameters['model'],
model_parameters=parameters['model_parameters'],
decomposition=parameters['decomposition'],
n_components=parameters['n_components'],
forecast_length=forecast_length,
)
return model
elif model == 'DatepartRegression':
from autots.models.sklearn import DatepartRegression
model = DatepartRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
**parameters,
)
return model
else:
raise AttributeError(
("Model String '{}' not a recognized model type").format(model)
)
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
transformer_object = GeneralTransformer(**transformation_dict)
df_train_transformed = transformer_object._fit(df_train)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
# THIS CHECKS POINT FORECAST FOR NULLS BUT NOT UPPER/LOWER FORECASTS
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
# CHECK Forecasts are proper length!
if df_forecast.forecast.shape[0] != forecast_length:
raise ValueError(f"Model {model_str} returned improper forecast_length")
transformationStartTime = datetime.datetime.now()
# Inverse the transformations, NULL FILLED IN UPPER/LOWER ONLY
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast, fillzero=True)
)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast, fillzero=True)
)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast
class TemplateEvalObject(object):
"""Object to contain all your failures!."""
def __init__(
self,
model_results=pd.DataFrame(),
per_timestamp_smape=pd.DataFrame(),
per_series_mae=pd.DataFrame(),
per_series_spl=pd.DataFrame(),
per_series_rmse1=pd.DataFrame(),
per_series_rmse2=pd.DataFrame(),
model_count: int = 0,
):
self.model_results = model_results
self.model_count = model_count
self.per_series_mae = per_series_mae
self.per_series_spl = per_series_spl
self.per_series_rmse1 = per_series_rmse1
self.per_series_rmse2 = per_series_rmse2
self.per_timestamp_smape = per_timestamp_smape
def __repr__(self):
"""Print."""
return 'Results objects, result table at self.model_results (pd.df)'
def concat(self, another_eval):
"""Merge another TemplateEvalObject onto this one."""
self.model_results = pd.concat(
[self.model_results, another_eval.model_results],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
self.per_series_mae = pd.concat(
[self.per_series_mae, another_eval.per_series_mae], axis=0, sort=False
)
self.per_series_spl = pd.concat(
[self.per_series_spl, another_eval.per_series_spl], axis=0, sort=False
)
self.per_series_rmse1 = pd.concat(
[self.per_series_rmse1, another_eval.per_series_rmse1], axis=0, sort=False
)
self.per_series_rmse2 = pd.concat(
[self.per_series_rmse2, another_eval.per_series_rmse2], axis=0, sort=False
)
self.per_timestamp_smape = pd.concat(
[self.per_timestamp_smape, another_eval.per_timestamp_smape],
axis=0,
sort=False,
)
self.model_count = self.model_count + another_eval.model_count
return self
def save(self, filename):
"""Save results to a file."""
if '.csv' in filename:
self.model_results.to_csv(filename, index=False)
elif '.pickle' in filename:
import pickle
with open(filename, "wb") as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
else:
raise ValueError("filename not .csv or .pickle")
def unpack_ensemble_models(
template,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
keep_ensemble: bool = True,
recursive: bool = False,
):
"""Take ensemble models from template and add as new rows.
Args:
template (pd.DataFrame): AutoTS template containing template_cols
keep_ensemble (bool): if False, drop row containing original ensemble
recursive (bool): if True, unnest ensembles of ensembles...
"""
ensemble_template = pd.DataFrame()
template['Ensemble'] = np.where(
((template['Model'] == 'Ensemble') & (template['Ensemble'] < 1)),
1,
template['Ensemble'],
)
for index, value in template[template['Ensemble'] != 0][
'ModelParameters'
].iteritems():
model_dict = json.loads(value)['models']
model_df = pd.DataFrame.from_dict(model_dict, orient='index')
model_df = model_df.rename_axis('ID').reset_index(drop=False)
model_df['Ensemble'] = 0
# unpack nested ensembles, if recursive specified
if recursive and 'Ensemble' in model_df['Model'].tolist():
model_df = pd.concat(
[
unpack_ensemble_models(
model_df, recursive=True, template_cols=template_cols
),
model_df,
],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
ensemble_template = pd.concat(
[ensemble_template, model_df], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
if not keep_ensemble:
template = template[template['Ensemble'] == 0]
template = pd.concat(
[template, ensemble_template], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
template = template.drop_duplicates(subset=template_cols)
return template
def PredictWitch(
template,
df_train,
forecast_length: int,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
horizontal_subset: list = None,
):
"""Takes numeric data, returns numeric forecasts.
Only one model (albeit potentially an ensemble)!
Well, she turned me into a newt.
A newt?
I got better. -Python
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
template_cols (list): column names of columns used as model template
horizontal_subset (list): columns of df_train to use for forecast, meant for horizontal ensembling
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object):
"""
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
template = template.head(1)
for index_upper, row_upper in template.iterrows():
# if an ensemble
if row_upper['Model'] == 'Ensemble':
forecasts_list = []
forecasts_runtime = {}
forecasts = {}
upper_forecasts = {}
lower_forecasts = {}
ens_model_str = row_upper['Model']
ens_params = json.loads(row_upper['ModelParameters'])
ens_template = unpack_ensemble_models(
template, template_cols, keep_ensemble=False, recursive=False
)
# horizontal generalization
if str(row_upper['Ensemble']) == '2':
available_models = list(ens_params['models'].keys())
known_matches = ens_params['series']
all_series = generalize_horizontal(
df_train, known_matches, available_models
)
else:
all_series = None
total_ens = ens_template.shape[0]
for index, row in ens_template.iterrows():
# recursive recursion!
try:
if all_series is not None:
test_mod = row['ID']
horizontal_subset = [
ser for ser, mod in all_series.items() if mod == test_mod
]
df_forecast = PredictWitch(
row,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
horizontal_subset=horizontal_subset,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
forecasts_list.extend([model_id])
forecasts_runtime[model_id] = total_runtime
forecasts[model_id] = df_forecast.forecast
upper_forecasts[model_id] = df_forecast.upper_forecast
lower_forecasts[model_id] = df_forecast.lower_forecast
# print(f"{ens_params['model_name']} with shape {df_forecast.forecast.shape}")
if verbose >= 2:
p = f"Ensemble {ens_params['model_name']} component {index + 1} of {total_ens} succeeded"
print(p)
except Exception as e:
# currently this leaves no key/value for models that fail
if verbose >= 1: # 1
p = f"FAILED: Ensemble {ens_params['model_name']} component {index} of {total_ens} with error: {repr(e)}"
print(p)
ens_forecast = EnsembleForecast(
ens_model_str,
ens_params,
forecasts_list=forecasts_list,
forecasts=forecasts,
lower_forecasts=lower_forecasts,
upper_forecasts=upper_forecasts,
forecasts_runtime=forecasts_runtime,
prediction_interval=prediction_interval,
df_train=df_train,
prematched_series=all_series,
)
return ens_forecast
# if not an ensemble
else:
model_str = row_upper['Model']
parameter_dict = json.loads(row_upper['ModelParameters'])
transformation_dict = json.loads(row_upper['TransformationParameters'])
if (
horizontal_subset is not None
and model_str in no_shared
and all(
trs not in shared_trans
for trs in list(transformation_dict['transformations'].values())
)
):
df_train_low = df_train.reindex(copy=True, columns=horizontal_subset)
# print(f"Reducing to subset for {model_str} with {df_train_low.columns}")
else:
df_train_low = df_train.copy()
df_forecast = ModelPrediction(
df_train_low,
forecast_length,
transformation_dict,
model_str,
parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
grouping_ids=grouping_ids,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
startTimeStamps=startTimeStamps,
n_jobs=n_jobs,
)
return df_forecast
def TemplateWizard(
template,
df_train,
df_test,
weights,
model_count: int = 0,
ensemble: str = True,
forecast_length: int = 14,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
validation_round: int = 0,
current_generation: int = 0,
max_generations: int = 0,
model_interrupt: bool = False,
grouping_ids=None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Take Template, returns Results.
There are some who call me... Tim. - Python
Args:
template (pandas.DataFrame): containing model str, and json of transformations and hyperparamters
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
df_test (pandas.DataFrame): dataframe of actual values of (forecast length * n series)
weights (dict): key = column/series_id, value = weight
ensemble (str): desc of ensemble types to prepare metric collection
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
validation_round (int): int passed to record current validation.
current_generation (int): info to pass to print statements
max_generations (int): info to pass to print statements
model_interrupt (bool): if True, keyboard interrupts are caught and only break current model eval.
template_cols (list): column names of columns used as model template
Returns:
TemplateEvalObject
"""
ensemble = str(ensemble)
template_result = TemplateEvalObject()
template_result.model_count = model_count
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
# template = unpack_ensemble_models(template, template_cols, keep_ensemble = False)
for index, row in template.iterrows():
try:
model_str = row['Model']
parameter_dict = json.loads(row['ModelParameters'])
transformation_dict = json.loads(row['TransformationParameters'])
ensemble_input = row['Ensemble']
current_template = pd.DataFrame(row).transpose()
template_result.model_count += 1
if verbose > 0:
if validation_round >= 1:
base_print = (
"Model Number: {} of {} with model {} for Validation {}".format(
str(template_result.model_count),
template.shape[0],
model_str,
str(validation_round),
)
)
else:
base_print = (
"Model Number: {} with model {} in generation {} of {}".format(
str(template_result.model_count),
model_str,
str(current_generation),
str(max_generations),
)
)
if verbose > 1:
print(
base_print
+ " with params {} and transformations {}".format(
json.dumps(parameter_dict),
json.dumps(transformation_dict),
)
)
else:
print(base_print)
df_forecast = PredictWitch(
current_template,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
per_ts = True if 'distance' in ensemble else False
if 'hdist' in ensemble:
dist_n = int(np.ceil(0.3 * forecast_length))
else:
dist_n = None
model_error = PredictionEval(
df_forecast,
df_test,
series_weights=weights,
df_train=df_train,
per_timestamp_errors=per_ts,
dist_n=dist_n,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
result = pd.DataFrame(
{
'ID': model_id,
'Model': df_forecast.model_name,
'ModelParameters': json.dumps(df_forecast.model_parameters),
'TransformationParameters': json.dumps(
df_forecast.transformation_parameters
),
'TransformationRuntime': df_forecast.transformation_runtime,
'FitRuntime': df_forecast.fit_runtime,
'PredictRuntime': df_forecast.predict_runtime,
'TotalRuntime': total_runtime,
'Ensemble': ensemble_input,
'Exceptions': np.nan,
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
a = pd.DataFrame(
model_error.avg_metrics_weighted.rename(lambda x: x + '_weighted')
).transpose()
result = pd.concat(
[result, pd.DataFrame(model_error.avg_metrics).transpose(), a], axis=1
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
if 'horizontal' in ensemble:
cur_mae = model_error.per_series_metrics.loc['mae']
cur_mae = pd.DataFrame(cur_mae).transpose()
cur_mae.index = [model_id]
template_result.per_series_mae = pd.concat(
[template_result.per_series_mae, cur_mae], axis=0
)
if 'probabilistic' in ensemble:
cur_spl = model_error.per_series_metrics.loc['spl']
cur_spl = | pd.DataFrame(cur_spl) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
| DatetimeTZDtype(tz="US/Eastern") | pandas.core.dtypes.dtypes.DatetimeTZDtype |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 18:41:53 2020
@author: Danish
"""
import pandas as pd
import os
from utilities import to_weeks, extract_sub_df
import numpy as np
import pickle
path = r'C:\Users\danis\Documents\USFoods'
csv_files = os.listdir(path+'/COVID')
#removes the first file which is non csv
csv_files.pop(0)
#reading all the csv files
covid_df = []
for f in csv_files:
df = pd.read_csv(path+'/COVID/'+f)
covid_df.append(df)
covid_df1 = covid_df.pop(18)
covid_df = pd.concat(covid_df)
############################ Converting Dates to Weeks ############################
#converting date to weeks
date = covid_df.date.copy()
date = date.reset_index(drop=True)
weeks0 = to_weeks(date, format_='%Y%m%d', splitter='-', year_index=0)
covid_df['fisc_wk'] = weeks0
#converting date to weeks
date = covid_df1.date.copy()
date = date.reset_index(drop=True)
weeks1 = to_weeks(date, format_='%d%m%Y', splitter='/', year_index=2)
covid_df1['fisc_wk'] = weeks1
############################ Adding zip codes ############################
#concatenated DF
covid_df = pd.concat([covid_df, covid_df1])
covid_df = covid_df.reset_index(drop=True)
county = covid_df.county.unique()
zip_df = | pd.read_csv(path+'/zip_to_county.csv') | pandas.read_csv |
import pandas as pd
import glob, os
config = dict(
safegraph_data_path = '~/safegraph_data/'
)
joined_df = pd.read_pickle('data/us_data_with_latent_populations.pkl')
joined_df = | pd.read_pickle('joined_df_test_us.pkl') | pandas.read_pickle |
import pandas as pd
from tabulate import tabulate
import pprint
class System:
def __init__(self, api):
""" Gets information on system information like notifications, status codes, and metrics
:param api: api authentication using the Alooma package
"""
self.api = api
def get_status_codes(self):
""" Gets the possible status codes from Alooma
:return: A dictionary with status codes and their descriptions
"""
return self.api.get_samples_status_codes()
def status_code_info(self, print_format='table', table_limit=None, pprint_indent=2, pprint_width=20, pprint_depth=5):
""" Prints the status codes that Alooma may return from with the event info
:param print_format: string 'table' to print event info as tables or 'json' to print as dictionaries
:param table_limit: A limit to the columns to print
:param pprint_indent: The indent value to pprint dictionaries
:param pprint_width: The width value to pprint dictionaries
:param pprint_depth: The depth value to pprint dictionaries
:return: A dictionary with the status codes and their descriptions
"""
status_codes = self.get_status_codes()
if print_format == 'table':
df = pd.Series(status_codes).reset_index()
df.columns = ['Status Code', 'Status Description']
print (tabulate(df.head(n=table_limit), headers='keys', tablefmt='psql', showindex=True))
elif print_format == 'json':
pprint.pprint(status_codes, indent=pprint_indent, width=pprint_width, depth=pprint_depth)
return status_codes
def system_metric_info(self, metric_type_names=None, last_n_minutes=60):
""" Gets the system metrics and prints a dataframe with the results
:param metric_type_names: string or list A list of systems metrics or a single metric name.
:param last_n_minutes: The length of time in minutes counting back from the current time to retrieve notifications for
:return: A dataframe with the inforamtion
"""
metric_names = ['EVENTS_IN_PIPELINE',
'UNMAPPED_EVENTS',
'IGNORED_EVENTS',
'ERROR_EVENTS',
'LOADED_EVENTS_RATE']
if not metric_type_names:
metric_type_names = metric_names
system_metrics = self.api.get_metrics_by_names(metric_names=metric_type_names, minutes=last_n_minutes)
lst = []
for metrics in system_metrics:
row_dict = {'target': metrics['target'],
'value1': metrics['datapoints'][0][0],
'timestamp1': | pd.to_datetime(metrics['datapoints'][0][1], unit='s') | pandas.to_datetime |
"""
Script for exploring ESGF results.
"""
import json
import re
import pandas as pd
def split_esgf_string(model_data):
"""
Use re to split as split method takes only one split string
:param model_data:
:return:
"""
model_data = re.split('\.|\|', esgf_holdings_master_list['id'].loc[1])
return model_data
if __name__ == '__main__':
# Read in the ESGF results
with open("data_json/ESGF_results_2020-07-25.json", "r") as read_file:
esgf_data = json.load(read_file)
# first look at this dictionary - they're nested
# esgf_data.keys()
# an array of dictionaries
esgf_holdings_master_list = | pd.DataFrame.from_dict(esgf_data['response']['docs']) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import pickle
from .utils import *
def predNextDays(optmod_name, opt_mod, var_name, pred_days):
pred = (opt_mod[optmod_name]['mod_data'][var_name])[opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period'] -1 :opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period']+pred_days]
print("Mod: %s \t Next days: %s: \t %s" %(optmod_name, var_name, str([int(x) for x in pred])))
print("Mod: %s \t Variation: %s: \t %s" %(optmod_name, var_name, str([int(x) for x in (pred[1:len(pred)] - pred[0:len(pred)-1])])))
class ModelStats:
def __init__(self, model, act_data, pred_days = 10):
self.model = model
self.act_data = act_data
self.data = pd.DataFrame(self.calcData())
self.data.set_index("date", inplace=True)
def printKpi(self, date, kpi_name, title, num_format = 'd', bperc = False):
var_uff = "uff_" + kpi_name
var_mod = "mod_" + kpi_name
if "uff_" + kpi_name in self.data.columns.tolist():
#print(("%30s: %7" + num_format + " vs %7" + num_format + " (%5" + num_format + " vs %5" + num_format + "), errore: %" + num_format + "") %(
print(("%30s: %7s vs %7s (%5s vs %5s), errore: %s") %(
title,
format_number(self.data[var_uff][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_uff][np.datetime64(date, 'D')] - self.data[var_uff][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc),
format_number(self.data[var_uff][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc)
))
else:
#print(("%30s: %7" + num_format + " (%5" + num_format + ")") %(
print(("%30s: %7s (%5s)") %(
title,
format_number(self.data[var_mod][np.datetime64(date, 'D')], bperc = bperc),
format_number(self.data[var_mod][np.datetime64(date, 'D')] - self.data[var_mod][np.datetime64(date, 'D') - np.timedelta64(1, 'D')], bperc = bperc)
))
def printKpis(self, date):
self.printKpi(date, 'Igc_cum', "Tot Infected")
self.printKpi(date, 'Igc', "Currently Infected")
self.printKpi(date, 'Igci_t', "Currently in Int. Care")
self.printKpi(date, 'Gc_cum', "Tot Recovered")
self.printKpi(date, 'M_cum', "Tot Dead")
print()
self.printKpi(date, 'Igc_cum_pinc', "% Increase, Infected", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Gc_Igc', "% Mortality Rate", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_M_Igc', "% Known Recovery Rate", num_format=".3f", bperc = True)
print()
self.printKpi(date, 'ratio_Gccum_Igccum', "% Recovered / Tot", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Mcum_Igccum', "% Dead / Tot", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Igci_Igc', "% Intensive Care", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_Igcn_Igc', "% Non Intensive Care", num_format=".3f", bperc = True)
self.printKpi(date, 'ratio_I_Igc', "% Total Infected / Known Infected", num_format=".3f", bperc = True)
self.printKpi(date, 'R0_t', "R0", num_format=".3f")
print()
print()
print("*** 7 days ahead predictions ***")
self.printPredict(date, 'Igc_cum', "Tot Infettati", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Igc', "Attualmente Infetti", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Igci_t', "Attualmente in Intensiva", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'Gc_cum', "Tot Guariti", pred_step = 7, bperc = False)
print()
self.printPredict(date, 'M_cum', "Tot Morti", pred_step = 7, bperc = False)
def printPredict(self, curr_date, kpi_name, title, pred_step = 7, bperc = False):
var_mod = "mod_" + kpi_name
data = self.data[var_mod][np.datetime64(curr_date, 'D') : np.datetime64(np.datetime64(curr_date, 'D') + np.timedelta64(pred_step, 'D'))]
data_delta = pd.Series(data).diff(1)
data_str = "["
for val in data:
data_str = " " + data_str + " {:7s}".format(format_number(val)) + " "
data_str = data_str + "]"
data_delta_str = "["
for val in data_delta:
#data_delta_str = " " + data_delta_str + " {:7s}".format(format_number(val)) + " "
#print(val)
#if math.isfinite(val):
data_delta_str = " " + data_delta_str + " {:7s}".format(str(format_number(val))) + " "
#else:
# data_delta_str = " " + data_delta_str + " {:7s}".format("0") + " "
data_delta_str = data_delta_str + "]"
print(("%30s: %60s") %(
title,
data_str
))
print(("%30s: %60s") %(
"Var.",
data_delta_str
))
def calcData(self):
def calcDataVar(data):
istart = self.model['i_start']
#iend = istart + len(data)
mod_len = len(self.model['mod_data']['dat_Igc'])
#return [np.NaN for i in range (0, istart)] + data.tolist() + [np.NaN for i in range(istart + len(data) -1, mod_len-1)]
return [np.NaN for i in range (0, istart)] + data.tolist()[self.act_data.i_start:] + [np.NaN for i in range(istart + len(data[self.act_data.i_start:]) -1, mod_len-1)]
def calcDataVarDate(data):
istart = self.model['i_start']
mod_len = len(self.model['mod_data']['dat_Igc'])
#first_date = data[0] - np.timedelta64(istart, 'D')
first_date = data[self.act_data.i_start] - np.timedelta64(istart, 'D')
return [np.datetime64(first_date + np.timedelta64(i, 'D'), 'D') for i in range (0, mod_len)]
uff_Igci_t = calcDataVar(self.act_data.dat_Igci_t)
uff_Igcn_t = calcDataVar(self.act_data.dat_Igcn_t)
uff_Igc = calcDataVar(self.act_data.dat_Igc)
uff_Igc_cum = calcDataVar(self.act_data.dat_Igc_cum)
uff_Gc_cum = calcDataVar(self.act_data.dat_Gc_cum)
uff_M_cum = calcDataVar(self.act_data.dat_M_cum)
uff_Gc = [np.NaN] + np.diff(uff_Gc_cum).tolist()
uff_M = [np.NaN] + np.diff(uff_M_cum).tolist()
uff_Igc_cum_pinc = (pd.Series(uff_Igc_cum)/pd.Series(uff_Igc_cum).shift(1)) - 1
uff_ratio_Gc_Igc = ( | pd.Series(uff_Gc) | pandas.Series |
#!/usr/bin/env python3
"""Script to perform the group analysis.
Creates the figures 3 and 4 from the paper
References:
https://towardsdatascience.com/an-introduction-to-the-bootstrap-method-58bcb51b4d60
https://machinelearningmastery.com/calculate-bootstrap-confidence-intervals-machine-learning-results-python/
https://stats.stackexchange.com/questions/186337/average-roc-for-repeated-10-fold-cross-validation-with-probability-estimates
"""
import argparse
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn.metrics import roc_curve, auc
from tqdm import tqdm
from utils import COLUMNS_NAME, load_dataset, cliff_delta
PROJECT_ROOT = Path.cwd()
def compute_brain_regions_deviations(diff_df, clinical_df, disease_label, hc_label=1):
""" Calculate the Cliff's delta effect size between groups."""
region_df = pd.DataFrame(columns=['regions', 'pvalue', 'effect_size'])
diff_hc = diff_df.loc[clinical_df['Diagn'] == disease_label]
diff_patient = diff_df.loc[clinical_df['Diagn'] == hc_label]
for region in COLUMNS_NAME:
_, pvalue = stats.mannwhitneyu(diff_hc[region], diff_patient[region])
effect_size = cliff_delta(diff_hc[region].values, diff_patient[region].values)
region_df = region_df.append({'regions': region, 'pvalue': pvalue, 'effect_size': effect_size},
ignore_index=True)
return region_df
def compute_classification_performance(reconstruction_error_df, clinical_df, disease_label, hc_label=1):
""" Calculate the AUCs of the normative model."""
error_hc = reconstruction_error_df.loc[clinical_df['Diagn'] == hc_label]['Reconstruction error']
error_patient = reconstruction_error_df.loc[clinical_df['Diagn'] == disease_label]['Reconstruction error']
fpr, tpr, _ = roc_curve(list(np.zeros_like(error_hc)) + list(np.ones_like(error_patient)),
list(error_hc) + list(error_patient))
roc_auc = auc(fpr, tpr)
tpr = np.interp(np.linspace(0, 1, 101), fpr, tpr)
tpr[0] = 0.0
return roc_auc, tpr
def main(dataset_name, disease_label):
"""Perform the group analysis."""
# ----------------------------------------------------------------------------
n_bootstrap = 1000
model_name = 'supervised_aae'
participants_path = PROJECT_ROOT / 'data' / dataset_name / 'participants.tsv'
freesurfer_path = PROJECT_ROOT / 'data' / dataset_name / 'freesurferData.csv'
hc_label = 1
# ----------------------------------------------------------------------------
bootstrap_dir = PROJECT_ROOT / 'outputs' / 'bootstrap_analysis'
model_dir = bootstrap_dir / model_name
ids_path = PROJECT_ROOT / 'outputs' / (dataset_name + '_homogeneous_ids.csv')
# ----------------------------------------------------------------------------
clinical_df = load_dataset(participants_path, ids_path, freesurfer_path)
clinical_df = clinical_df.set_index('participant_id')
tpr_list = []
auc_roc_list = []
effect_size_list = []
for i_bootstrap in tqdm(range(n_bootstrap)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
output_dataset_dir = bootstrap_model_dir / dataset_name
output_dataset_dir.mkdir(exist_ok=True)
analysis_dir = output_dataset_dir / '{:02d}_vs_{:02d}'.format(hc_label, disease_label)
analysis_dir.mkdir(exist_ok=True)
# ----------------------------------------------------------------------------
normalized_df = pd.read_csv(output_dataset_dir / 'normalized.csv', index_col='participant_id')
reconstruction_df = | pd.read_csv(output_dataset_dir / 'reconstruction.csv', index_col='participant_id') | pandas.read_csv |
import pandas as pd
import os
df = pd.DataFrame(columns=["Server-RSSI-1", "Server-RSSI-2", "Server-RSSI-3", "Square"])
point_df = pd.DataFrame(columns=["Server-RSSI-1", "Server-RSSI-2", "Server-RSSI-3", "Square", "Point"])
for root, dirs, files in os.walk("."):
group_id = 0
for filename in files:
with open(filename, encoding="utf8", errors='ignore') as f:
data_point = 0
is_data = False
for ind, line in enumerate(f.readlines()):
if line.startswith("[INFO] RSSI sampling started"):
is_data = True
point_df = | pd.DataFrame(columns=["Server-RSSI-1", "Server-RSSI-2", "Server-RSSI-3", "Square", "Point"]) | pandas.DataFrame |
import pandas as pd
import re
from collections import OrderedDict
import time
#This file has various helper functions. Checkout README for the flow.
def helper_input_snt_to_tkn(snt):
step1 = []
for token in snt.split(' '):
handled = False
if '-' in token:
subkns = token.split('-')
for i in range(0,len(subkns) - 1):
step1.append(subkns[i])
step1.append('-')
step1.append(subkns[len(subkns) - 1])
handled = True
if not handled:
step1.append(token)
step2 = []
for token in step1:
m = re.search("^([0-9:\.,½¼¾⅛⅔⅓$¥€£]+)([/А-яа-яA-Za-z²³2\"\'\.\,]+)$", token)
if m:
num = m.group(1)
suffix = m.group(2)
step2.append(num)
step2.append(suffix)
else:
step2.append(token)
return step2
def input_snt_to_tkn(in_file, resulting_in_file):
inData = open(in_file, "r+", encoding='utf-8').readlines()
outData = []
for snt in inData:
newSnt = []
for token in helper_input_snt_to_tkn(snt):
if not re.match("\<T[0-9]*\>", token) and not re.match("\</T[0-9]*\>", token) and \
re.match("^[A-Za-z0-9+-г\./]*$", token) or re.match("^[A-Za-z#0+-9½¼¾⅛⅔⅓_—\-\,\.\$¥€£\:\%\(\)\\\/]*$",
token) or re.match("^[А-Я]*$",token) or \
(re.match("^[А-Яа-я]*$", token) and (sum(1 for c in token if c.isupper()) > 2)) or \
re.match("^[А-Я]\.[А-Я]\.$", token) or re.match("^[А-Я]\.[А-Я]\.[А-Я]\.$", token):
newSnt = newSnt + list(token)
else:
newSnt = newSnt + [token]
outData.append(" ".join(newSnt))
res_out = open(resulting_in_file, "w+", encoding='utf-8')
res_out.writelines(outData)
res_out.close()
def service_match_tokens(sentence, source_tokens):
translated_tokens = {}
for tid in source_tokens.keys():
m = re.search('<T%d> (.+?) </T%d>'%(tid,tid), sentence)
if m:
token = m.group(1)
translated_tokens[tid] = token
else:
translated_tokens[tid] = source_tokens[tid]
if (len(translated_tokens) != len(source_tokens)):
print("Consistency issue %d vs. %d: %s"%(len(translated_tokens), len(source_tokens), sentence))
return translated_tokens
#NOTE: this doesn't do any work, since we chnaged the format.
def eliminate_unks(org, enc, trn):
if len(org) != len(trn) or len(trn) != len(enc):
print(
"Error: Provided file sizes(%d,%d,%d) are not equal. Check the parameters" % (len(org), len(enc), len(trn)))
exit(-1)
for i in range(len(org)):
org_tokens = org[i].split(" ")
enc_tokens = enc[i].split(" ")
trn_tokens = trn[i].split(" ")
unks = []
for j in range(len(enc_tokens)):
if enc_tokens[j] == "<unk>":
unks.append(org_tokens[j])
#print(unks)
upointer = 0
for j in range(len(trn_tokens)):
if trn_tokens[j] == "<unk>":
if upointer < len(unks):
trn_tokens[j] = unks[upointer]
else:
trn_tokens[j] = "" #""<unk %d>"%upointer #Ignore unmatched unks
upointer += 1
trn[i] = " ".join(trn_tokens)
return trn
def restore_from_translation(original_in_file, token_in_file, encoded_in_file, translation_file, output_file):
token_in = open(token_in_file, "r+", encoding='utf-8').read().splitlines()
encoded_in = open(encoded_in_file, "r+", encoding='utf-8').read().splitlines()
sentences = open(translation_file, "r+", encoding='utf-8').read().splitlines()
sentences = eliminate_unks(token_in, encoded_in, sentences)
original_input = pd.read_csv(original_in_file, encoding='utf-8')
valid_keys = set()
src_dataset = {}
for (sid, tid, before) in original_input[['sentence_id', 'token_id', 'before']].values:
if (not isinstance(before, str)):
before = str(before)
if sid not in src_dataset:
src_dataset[sid] = {};
src_dataset[sid][tid] = before
valid_keys.add("%d_%d"%(sid,tid))
print("Read original source.")
count2sid = list(src_dataset.keys())
result = []
now = time.time() * 1000
acc = 0;
for id, source_tokens in enumerate(src_dataset.values()):
acc += len(source_tokens)
matched_tokens = service_match_tokens(sentences[id], source_tokens)
for tid, token in matched_tokens.items():
cid = "%s_%s" % (count2sid[id], tid)
if cid in valid_keys: #Just drop everything that is not in
result.append([cid, token])
else:
print("WARNING: Dropping %s!", cid)
if id % 10000 == 0:
print("10000 sentences took %d ms. Acc %d" %((time.time()*1000 - now), acc))
acc =0;
now = time.time() * 1000
out_df = pd.DataFrame(data=result, columns=["id", "after"])
out_df.to_csv(output_file, encoding='utf-8', index=False)
def competition_input_to_sentences(inFile, resultingInFile):
# Processing 'in' file first - writing it into sentences.
inData = pd.read_csv(inFile, encoding='utf-8')
srcDataset = OrderedDict()
for (sid, tid, before) in inData[['sentence_id', 'token_id', 'before']].values:
if (not isinstance(before, str)):
before = str(before)
if sid not in srcDataset:
srcDataset[sid] = [];
wrap = True
if wrap: srcDataset[sid].append("<T%d>"%tid)
for key in before.split(" "):
srcDataset[sid].append(key)
if wrap: srcDataset[sid].append("</T%d>"%tid)
resIn = open(resultingInFile, "w+", encoding='utf-8')
for snt in srcDataset.values():
resIn.write("%s\n" % " ".join(snt))
resIn.close()
def post_process_translation(original_input, translated_csv, out_file, dict_file):
source_dataset = pd.read_csv(original_input, encoding='utf-8')
translation = pd.read_csv(translated_csv, encoding='utf-8')
original_data = {}
for (sid, tid, before) in source_dataset[['sentence_id', 'token_id', 'before']].values:
if (not isinstance(before, str)):
before = str(before)
if sid not in original_data:
original_data[sid] = {};
if tid not in original_data[sid]:
original_data[sid][tid] = before
else:
print("ERROR: In the source %d sid, %d tid is not uniq"%(sid,tid))
#print(original_data[1183][2])
translated_data = {}
for (id, after) in translation[['id', 'after']].values:
if (not isinstance(after, str)):
after = str(after)
sid,tid = id.split('_')
sid = int(sid)
tid = int(tid)
if sid not in translated_data:
translated_data[sid] = {};
if tid not in translated_data[sid]:
translated_data[sid][tid] = after
else:
print("ERROR: In transaltion %d sid, %d tid is not uniq" % (sid, tid))
if tid not in original_data[sid]:
print("WARNING: we have tid that is not in the source. tid: %d, sid:%d "%(sid,tid))
#Handle multiple consequent unks
for sid in translated_data.keys():
extra_for_current_sid = {}
for tid in translated_data[sid].keys():
if '<unk>' in translated_data[sid][tid]:
utokens=translated_data[sid][tid].split(' ')
for i in range(len(utokens)):
if (i == 0) or (tid + i not in translated_data[sid].keys()):
extra_for_current_sid[tid + i] = utokens[i]
else:
print("WARNING: Consistency issue with '<unk>', sid %d, tid %d" % (sid, tid+i))
for tid, val in extra_for_current_sid.items():
translated_data[sid][tid] = val
#End of multiple split
for sid in translated_data.keys():
tids_to_remove = []
for tid in translated_data[sid].keys():
if translated_data[sid][tid] == '<unk>':
if tid in original_data[sid].keys() :
translated_data[sid][tid] = original_data[sid][tid]
else:
tids_to_remove.append(tid)
for tid in tids_to_remove:
translated_data[sid].pop(tid, None) # remove the token
dictionary = load_dictionary(dict_file)
corrected = 0;
total = 0;
for sid in original_data.keys():
for tid in original_data[sid].keys():
total += 1;
before = original_data[sid][tid]
if before in dictionary.keys():
if tid in translated_data[sid]:
after = translated_data[sid][tid]
else:
after = "<dumm>"
dict_after = dictionary[before]
if not after == dict_after:
print("%s => Correcting \"%s\" to \"%s\""%(before, after, dict_after))
translated_data[sid][tid] = dict_after
corrected +=1
print("Corrected %d (%f percent) tokens from training dictionary"%(corrected, 100.0*corrected/total))
result = []
for sid in sorted(translated_data.keys()):
for tid in sorted(translated_data[sid].keys()):
cid = "%s_%s" % (sid, tid)
result.append([cid, translated_data[sid][tid]])
outDF = | pd.DataFrame(data=result, columns=["id", "after"]) | pandas.DataFrame |
"""
Coding: UTF-8
Author: Randal
Time: 2021/2/20
E-mail: <EMAIL>
Description: This is a simple toolkit for data extraction of text.
The most important function in the script is about word frequency statistics.
Using re, I generalized the process in words counting, regardless of any preset
word segmentation. Besides, many interesting functions, like getting top sentences are built here.
All rights reserved.
"""
import xlwings as xw
import pandas as pd
import numpy as np
import os
import re
from alive_progress import alive_bar
from alive_progress import show_bars, show_spinners
import jieba
import datetime
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import math
class jieba_vectorizer(CountVectorizer):
def __init__(self, tf, userdict, stopwords, orient=False):
"""
:param tf: 输入的样本框,{axis: 1, 0: id, 1: 标题, 2: 正文, 3: 来源, 4: freq}
:param stopwords: 停用词表的路径
:param user_dict_link: 关键词清单的路径
:param orient: {True: 返回的 DTM 只包括关键词清单中的词,False: 返回 DTM 中包含全部词语}
:return: 可以直接使用的词向量样本
"""
self.userdict = userdict
self.orient = orient
self.stopwords = stopwords
jieba.load_userdict(self.userdict) # 载入关键词词典
tf = tf.copy() # 防止对函数之外的原样本框造成改动
print('切词中,请稍候……')
rule = re.compile(u'[^\u4e00-\u9fa5]') # 清洗所有样本,只保留汉字
for i in range(0, tf.shape[0]):
try:
tf.iloc[i, 2] = rule.sub('', tf.iloc[i, 2])
except TypeError:
print('样本清洗Error: doc_id = ' + str(i))
continue
if self.stopwords is not None:
stopwords = txt_to_list(self.stopwords) # 载入停用词表
else:
stopwords = []
# 开始切词
words = []
items = range(0, len(tf))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i, row in tf.iterrows():
item = row['正文']
result = jieba.cut(item)
# 同时过滤停用词
word = ''
for element in result:
if element not in stopwords:
if element != '\t':
word += element
word += " "
words.append(word)
bar()
# CountVectorizer() 可以自动完成词频统计,通过fit_transform生成文本向量和词袋库
# 如果需要换成 tfidfVectorizer, 把下面三行修改一下就可以了
vect = CountVectorizer()
X = vect.fit_transform(words)
self.vectorizer = vect
matrix = X
X = X.toarray()
# 二维ndarray可以展示在pycharm里,但是和DataFrame性质完全不同
# ndarray 没有 index 和 column
features = vect.get_feature_names()
XX = pd.DataFrame(X, index=tf['id'], columns=features)
self.DTM0 = matrix
self.DTM = XX
self.features = features
# # 下面是之前走的弯路,不足一哂
# words_bag = vect.vocabulary_
# # 字典的转置(注意只适用于vk一一对应的情况,1v多k请参考setdefault)
# bag_words = dict((v, k) for k, v in words_bag.items())
#
# # 字典元素的排列顺序不等于字典元素值的排列顺序
# lst = []
# for i in range(0, len(XX.columns)):
# lst.append(bag_words[i])
# XX.columns = lst
if orient:
dict_filter = txt_to_list(self.userdict)
for word in features:
if word not in dict_filter:
XX.drop([word], axis=1, inplace=True)
self.DTM_key = XX
def get_feature_names(self):
return self.features
def strip_non_keywords(self, df):
ff = df.copy()
dict_filter = txt_to_list(self.userdict)
for word in self.features:
if word not in dict_filter:
ff.drop([word], axis=1, inplace=True)
return ff
def make_doc_freq(word, doc):
"""
:param word: 指的是要对其进行词频统计的关键词
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(word, doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得上下文
context[str(i)] = doc[MIN: MAX]
except IndexError:
print('IndexError: ' + word)
freq['Context'] = context
return freq
def make_info_freq(name, pattern, doc):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的正则表达式
:param doc: 指的是要遍历的文本
:return: lst: 返回字典,记录关键词在文本当中出现的频次以及上下文
注:该函数返回字典中的context元素为元组:(关键词,上下文)
"""
# 使用正则表达式进行匹配, 拼接成pattern
# re.S表示会自动换行
# finditer是findall的迭代器版本,通过遍历可以依次打印出子串所在的位置
it = re.finditer(pattern[0], doc, re.S)
# match.group()可以返回子串,match.span()可以返回索引
cls = pattern[1]
lst = []
for match in it:
lst.append(match.span())
freq = dict()
freq['Frequency'] = len(lst)
freq['Name'] = name
# 将上下文结果也整理为一个字典
context = dict()
for i in range(0, len(lst)):
# 将span的范围前后各扩展不多于10个字符,得到上下文
try:
# 为了划出适宜的前后文范围,需要设定索引的最大值和最小值
# 因此要比较span+10和doc极大值,span-10和doc极小值
# 最大值在两者间取小,最小值在两者间取大
MAX = min(lst[i][1] + 10, len(doc))
MIN = max(0, lst[i][0] - 10)
# 取得匹配到的关键词,并做掐头去尾处理
word = match_cut(doc[lst[i][0]: lst[i][1]], cls)
# 将关键词和上下文打包,存储到 context 条目中
context[str(i)] = (word, doc[MIN: MAX])
except IndexError:
print('IndexError: ' + name)
freq['Context'] = context
return freq
def make_docs_freq(word, docs):
"""
:param word: 指的是要对其进行词频统计的关键词
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列 (iloc: 0),正文列 (iloc: 2) 和预留出的频次列 (iloc: 4)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
for i in range(0, len(docs)):
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
freq['Doc' + str(docs.iloc[i, 0])] = make_doc_freq(word, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
context = doc['Context']
for i in range(0, num):
strip = {'id': item, 'freq': id_freq[item], 'word': word, 'num': i, 'context': context[str(i)]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'word'], drop=True, inplace=True)
freq['DFC'] = data
return freq
def make_infos_freq(name, pattern, docs):
"""
:param name: 指的是对其进行词频统计的形式
:param pattern: 指的是对其进行词频统计的(正则表达式, 裁剪方法)
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列(iloc: 0)和正文列(iloc: 2)
:return: 返回字典,其中包括“单关键词-单文本”的词频字典集合,以及计数结果汇总
"""
freq = dict()
# 因为总频数是通过"+="的方式计算,不是简单赋值,所以要预设为0
freq['Total Frequency'] = 0
docs = docs.copy() # 防止对函数之外的原样本框造成改动
items = range(0, len(docs))
with alive_bar(len(items), force_tty=True, bar='circles') as bar:
for i in items:
# 对于每个文档,都形成一个字典,字典包括关键词在该文档出现的频数和上下文
# id需要在第0列,正文需要在第2列
# pattern 要全须全尾地传递进去,因为make_info_freq两个参数都要用
freq['Doc' + str(docs.iloc[i, 0])] = make_info_freq(name, pattern, docs.iloc[i, 2])
# 在给每个文档形成字典的同时,对于总概率进行滚动加总
freq['Total Frequency'] += freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
docs.iloc[i, 4] = freq['Doc' + str(docs.iloc[i, 0])]['Frequency']
bar()
# 接下来建立一个DFC(doc-freq-context)统计面板,汇总所有文档对应的词频数和上下文
# 首先构建(id, freq)的字典映射
xs = docs['id']
ys = docs['freq']
# zip(迭代器)是一个很好用的方法,建议多用
id_freq = {x: y for x, y in zip(xs, ys)}
# 新建一个空壳DataFrame,接下来把数据一条一条粘贴进去
data = pd.DataFrame(columns=['id', 'freq', 'form', 'word', 'num', 'context'])
for item in xs:
doc = freq['Doc' + str(item)]
num = doc['Frequency']
# 从(关键词,上下文)中取出两个元素
context = doc['Context']
for i in range(0, num):
# context 中的关键词已经 match_cut 完毕,不需要重复处理
strip = {'id': item, 'form': name, 'freq': id_freq[item], 'word': context[str(i)][0],
'num': i, 'context': context[str(i)][1]}
# 默认orient参数等于columns
# 如果字典的值是标量,那就必须传递一个index,这是规定
strip = pd.DataFrame(strip, index=[None])
# df的append方法只能通过重新赋值来进行修改
data = data.append(strip)
data.set_index(['id', 'freq', 'form', 'word'], drop=True, inplace=True)
freq['DFC'] = data
print(name + ' Completed')
return freq
def words_docs_freq(words, docs):
"""
:param words: 表示要对其做词频统计的关键词清单
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列、正文列、和频率列
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=words, index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(words))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for word in words:
freq = make_docs_freq(word, docs)
freqs[word] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, word] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def infos_docs_freq(infos, docs):
"""
:param docs: 是要遍历的文本的集合,必须是pandas DataFrame的形式,至少包含id列和正文列
:param infos: 指的是正则表达式的列表,格式为字典,key是示例,如“(1)”,value 是正则表达式,如“([0-9])”
:return: 返回字典,其中包括“单关键词-多文本”的词频字典集合,以及最终的DFC(doc-frequency-context)和DTM(doc-term matrix)
"""
freqs = dict()
# 与此同时新建一个空壳DataFrame,用于汇总DFC
data = pd.DataFrame()
# 新建一个空壳,用于汇总DTM(Doc-Term-Matrix)
dtm = pd.DataFrame(None, columns=list(infos.keys()), index=docs['id'])
# 来吧,一个循环搞定所有
items = range(len(infos))
with alive_bar(len(items), force_tty=True, bar='blocks') as bar:
for k, v in infos.items():
freq = make_infos_freq(k, v, docs)
freqs[k] = freq
data = data.append(freq['DFC'])
for item in docs['id']:
dtm.loc[item, k] = freq['Doc' + str(item)]['Frequency']
bar()
# 记得要sort一下,不然排序的方式不对(应该按照doc id来排列)
data.sort_index(inplace=True)
freqs['DFC'] = data
freqs['DTM'] = dtm
return freqs
def massive_pop(infos, doc):
"""
:param infos: List,表示被删除内容对应的正则表达式
:param doc: 表示正文
:return: 返回一个完成删除的文本
"""
for info in infos:
doc = re.sub(info, '', doc)
return doc
def massive_sub(infos, doc):
"""
:param infos: Dict, 表示被替换内容对应的正则表达式及替换对象
:param doc: 表示正文
:return: 返回一个完成替换的文本
"""
for v, k in infos:
doc = re.sub(v, k, doc)
return doc
# 接下来取每个样本的前n句话(或者不多于前n句话的内容),再做一次进行对比
# 取前十句话的原理是,对!?。等表示语义结束的符号进行计数,满十次为止
def top_n_sent(n, doc, percentile=1):
"""
:param n: n指句子的数量,这个函数会返回一段文本中前n句话,若文本内容不多于n句,则全文输出
:param word: 指正文内容
:param percentile: 按照分位数来取句子时,要输入的分位,比如一共有十句话,取50%分位就是5句
如果有11句话,向下取整也是输出5句
:return: 返回字符串:前n句话
"""
info = '[。?!]'
# 在这个函数体内,函数主体语句的作用域大于循环体,因此循环内的变量相当于局部变量
# 因此想在循环外直接返回,就会出现没有定义的错误,因此可以做一个全局声明
# 但是不建议这样做,因为如果函数外有一个变量恰巧和局部变量重名,那函数外的变量也会被改变
# 因此还是推荐多使用迭代器,把循环包裹成迭代器,可以解决很多问题
# 而且已经封装好的迭代器,例如re.findall_iter,就不用另外再去写了,调用起来很方便
# 如下,第一行代码的作用是用列表包裹迭代器,形成一个生成器的列表
# 每个生成器都存在自己的 Attribute
re_iter = list(re.finditer(info, doc))
# max_iter 是 re 匹配到的最大次数
max_iter = len(re_iter)
# 这一句表示,正文过于简短,或者没有标点,此时直接输出全文
if max_iter == 0:
return doc
# 考虑 percentile 的情况,如果总共有11句,就舍弃掉原来的 n,直接改为总句数的 percentile 对应的句子数
# 注意是向下取整
if percentile != 1:
n = math.ceil(percentile * max_iter)
# 如果匹配到至少一句,循环自然结束,输出结果
if n > 0:
return doc[0: re_iter[n - 1].end()]
# 如果正文过于简短,或设定的百分比过低,一句话都凑不齐,此时直接输出第一句
elif n == 0:
return doc[0: re_iter[0].end()]
# 如果匹配到的句子数大于 n,此时只取前 n 句
if max_iter >= n:
return doc[0: re_iter[n - 1].end()]
# 如果匹配到的句子不足 n 句,直接输出全部内容
elif 0 < max_iter < n:
return doc[0: re_iter[-1].end()]
# 为减少重名的可能,尽量在函数体内减少变量的使用
def dtm_sort_filter(dtm, keymap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,标明了 类别-关键词列表 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个字典,字典包含两个 pandas.DataFrame: 一个是表示各个种类是否存在的二进制表,另一个是最终的种类数
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = pd.DataFrame.from_dict(strips, orient='index')
dtm_class = dtm_class.applymap(lambda x: 1 if x != 0 else 0)
dtm_final = dtm_class.agg(np.sum, axis=1)
result = {'DTM_class': dtm_class, 'DTM_final': dtm_final}
return result
def dtm_point_giver(dtm, keymap, scoremap, name=None):
"""
:param dtm: 前面生成的词频统计矩阵:Doc-Term-Matrix
:param keymap: 字典,{TypeA: [word1, word2, word3, ……], TypeB: ……}
:param scoremap: 字典,标明了 类别-分值 两者关系
:param name: 最终生成 Excel 文件的名称(需要包括后缀)
:return: 返回一个 pandas.DataFrame,表格有两列,一列是文本id,一列是文本的分值(所有关键词的分值取最高)
"""
dtm = dtm.applymap(lambda x: 1 if x != 0 else 0)
# 非 keymap 中词会被过滤掉
strips = {}
for i, row in dtm.iterrows():
strip = {}
for k, v in keymap.items():
strip[k] = 0
for item in v:
try:
strip[k] += row[item]
except KeyError:
pass
strips[i] = strip
dtm_class = | pd.DataFrame.from_dict(strips, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
location="measure1.csv"
e= | pd.read_csv(location) | pandas.read_csv |
#Imports
import os, sys
import glob
import time, sched
from datetime import datetime
import numpy as np
import pandas as pd
import socket
import psycopg2
import subprocess
import pytz
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from bokeh.io import curdoc # , output_file, save
from bokeh.models import (TextInput, ColumnDataSource, DateFormatter, Paragraph, Button, TextAreaInput, Select,CheckboxGroup, RadioButtonGroup)
from bokeh.models.widgets.markups import Div
from bokeh.layouts import layout, column, row
from bokeh.models.widgets import Panel, Tabs, FileInput
from bokeh.models.widgets.tables import DataTable, TableColumn
from bokeh.plotting import figure
from astropy.time import TimezoneInfo
import astropy.units.si as u
import ephem
from util import sky_calendar
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
sys.path.append(os.getcwd())
sys.path.append('./ECLAPI-8.0.12/lib')
import nightlog as nl
class Report():
def __init__(self, type):
self.test = False
self.report_type = type
self.utc = TimezoneInfo()
self.kp_zone = TimezoneInfo(utc_offset=-7*u.hour)
self.zones = [self.utc, self.kp_zone]
self.datefmt = DateFormatter(format="%m/%d/%Y %H:%M:%S")
self.inst_style = {'font-size':'150%'}
self.subt_style = {'font-size':'200%','font-style':'bold'}
self.title_style = {'font-size':'250%','font-style':'bold'}
self.alert_style = {'font-size':'150%','color':'red'}
self.nl_file = None
self.intro_subtitle = Div(text="Connect to Night Log",css_classes=['subt-style'])
self.time_note = Div(text="<b> Note: </b> Enter all times as HHMM (1818 = 18:18 = 6:18pm) in Kitt Peak local time. Either enter the time or hit the <b> Now </b> button if it just occured.", css_classes=['inst-style'])
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
if 'desi' in hostname:
self.location = 'kpno'
#self.conn = psycopg2.connect(host="desi-db", port="5442", database="desi_dev", user="desi_reader", password="<PASSWORD>")
elif 'app' in hostname: #this is not true. Needs to change.
self.location = 'nersc'
else:
self.location = 'other'
nw_dirs = {'nersc':'/global/cfs/cdirs/desi/spectro/nightwatch/nersc/', 'kpno':'/exposures/nightwatch/', 'other':None}
self.nw_dir = nw_dirs[self.location]
self.nl_dir = os.environ['NL_DIR']
self.your_name = TextInput(title ='Your Name', placeholder = '<NAME>')
self.os_name_1 = TextInput(title ='Observing Scientist 1', placeholder = '<NAME>')
self.os_name_2 = TextInput(title ='Observing Scientist 2', placeholder = "<NAME>")
self.lo_names = ['None ','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','Other ']
self.oa_names = ['None ','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','Other ']
self.intro_txt = Div(text=' ')
self.comment_txt = Div(text=" ", css_classes=['inst-style'], width=1000)
self.date_init = Select(title="Existing Night Logs")
self.time_title = Paragraph(text='Time* (Kitt Peak local time)', align='center')
self.now_btn = Button(label='Now', css_classes=['now_button'], width=50)
days = [d for d in os.listdir(self.nl_dir) if os.path.isdir(os.path.join(self.nl_dir, d))]
init_nl_list = np.sort([day for day in days if 'nightlog_meta.json' in os.listdir(os.path.join(self.nl_dir,day))])[::-1][0:10]
self.date_init.options = list(init_nl_list)
self.date_init.value = init_nl_list[0]
self.connect_txt = Div(text=' ', css_classes=['alert-style'])
self.connect_bt = Button(label="Connect to Existing Night Log", css_classes=['connect_button'])
self.exp_info = Div(text="Fill In Only Relevant Data. Mandatory fields have an asterisk*.", css_classes=['inst-style'],width=500)
self.exp_comment = TextAreaInput(title ='Comment/Remark', placeholder = 'Humidity high for calibration lamps',value=None,rows=10, cols=5,width=800,max_length=5000)
self.exp_time = TextInput(placeholder = '20:07',value=None, width=100) #title ='Time in Kitt Peak local time*',
self.exp_btn = Button(label='Add', css_classes=['add_button'])
self.exp_type = Select(title="Exposure Type", value = None, options=['None','Zero','Focus','Dark','Arc','FVC','DESI'])
self.exp_alert = Div(text=' ', css_classes=['alert-style'])
self.exp_exposure_start = TextInput(title='Exposure Number: First', placeholder='12345', value=None)
self.exp_exposure_finish = TextInput(title='Exposure Number: Last', placeholder='12346', value=None)
self.nl_subtitle = Div(text="Current DESI Night Log: {}".format(self.nl_file), css_classes=['subt-style'])
self.nl_btn = Button(label='Get Current DESI Night Log', css_classes=['connect_button'])
self.nl_text = Div(text=" ", css_classes=['inst-style'], width=1000)
self.nl_alert = Div(text='You must be connected to a Night Log', css_classes=['alert-style'], width=500)
self.nl_info = Div(text="Night Log Info:", css_classes=['inst-style'], width=500)
self.exptable_alert = Div(text=" ",css_classes=['alert-style'], width=500)
self.checklist = CheckboxGroup(labels=[])
self.check_time = TextInput(placeholder = '20:07', value=None) #title ='Time in Kitt Peak local time*',
self.check_alert = Div(text=" ", css_classes=['alert-style'])
self.check_btn = Button(label='Submit', css_classes=['add_button'])
self.check_comment = TextAreaInput(title='Comment', placeholder='comment if necessary', rows=3, cols=3)
self.prob_subtitle = Div(text="Problems", css_classes=['subt-style'])
self.prob_inst = Div(text="Describe problems as they come up and at what time they occur. If there is an Alarm ID associated with the problem, include it, but leave blank if not. If possible, include a description of the remedy.", css_classes=['inst-style'], width=1000)
self.prob_time = TextInput(placeholder = '20:07', value=None, width=100) #title ='Time in Kitt Peak local time*',
self.prob_input = TextAreaInput(placeholder="NightWatch not plotting raw data", rows=10, cols=3, title="Problem Description*:")
self.prob_alarm = TextInput(title='Alarm ID', placeholder='12', value=None, width=100)
self.prob_action = TextAreaInput(title='Resolution/Action',placeholder='description',rows=10, cols=3)
self.prob_btn = Button(label='Add', css_classes=['add_button'])
self.prob_alert = Div(text=' ', css_classes=['alert-style'])
self.img_subtitle = Div(text="Images", css_classes=['subt-style'])
self.img_upinst = Div(text="Include images in the Night Log by uploading a png image from your local computer. Select file, write a comment and click Add", css_classes=['inst-style'], width=1000)
self.img_upinst2 = Div(text=" Choose image to include with comment: ", css_classes=['inst-style'])
self.img_upload = FileInput(accept=".png")
self.img_upload.on_change('value', self.upload_image)
self.img_upload_comments = FileInput(accept=".png")
self.img_upload_comments.on_change('value', self.upload_image_comments)
self.img_upload_comments_os = FileInput(accept=".png")
self.img_upload_comments_os.on_change('value', self.upload_image_comments_os)
self.img_upload_problems = FileInput(accept=".png")
self.img_upload_problems.on_change('value', self.upload_image_problems)
self.img_inst = Div(text="Include images in the Night Log by entering the location of the images on the desi cluster", css_classes=['inst-style'], width=1000)
self.img_input = TextInput(title='image file location on desi cluster', placeholder='/n/home/desiobserver/image.png',value=None)
self.img_comment = TextAreaInput(placeholder='comment about image', rows=8, cols=3, title='Image caption')
self.img_btn = Button(label='Add', css_classes=['add_button'])
self.img_alert = Div(text=" ",width=1000)
self.plot_subtitle = Div(text="Telemetry Plots", css_classes=['subt-style'])
self.DESI_Log = None
self.save_telem_plots = False
def clear_input(self, items):
"""
After submitting something to the log, this will clear the form.
"""
if isinstance(items, list):
for item in items:
item.value = None
else:
items.value = None
def get_intro_layout(self):
intro_layout = layout([self.title,
[self.page_logo, self.instructions],
self.intro_subtitle,
[self.date_init, self.your_name],
[self.connect_bt],
self.connect_txt,
self.nl_info,
self.intro_txt], width=1000)
self.intro_tab = Panel(child=intro_layout, title="Initialization")
def get_checklist_layout(self):
checklist_layout = layout(self.title,
self.check_subtitle,
self.checklist_inst,
self.checklist,
self.check_comment,
[self.check_btn],
self.check_alert, width=1000)
self.check_tab = Panel(child=checklist_layout, title="DQS Checklist")
def get_prob_layout(self):
prob_layout = layout([self.title,
self.prob_subtitle,
self.prob_inst,
self.time_note,
[self.time_title, self.prob_time, self.now_btn, self.now_btn, self.img_upinst2, self.img_upload_problems],
self.prob_alarm,
[self.prob_input, self.prob_action],
[self.prob_btn],
self.prob_alert], width=1000)
self.prob_tab = Panel(child=prob_layout, title="Problems")
def get_plots_layout(self):
telem_data = pd.DataFrame(columns = ['tel_time','tower_time','exp_time','exp','mirror_temp','truss_temp','air_temp','humidity','wind_speed','airmass','exptime','seeing'])
self.telem_source = ColumnDataSource(telem_data)
plot_tools = 'pan,wheel_zoom,lasso_select,reset,undo,save'
p1 = figure(plot_width=800, plot_height=300, x_axis_label='UTC Time', y_axis_label='Temp (C)',x_axis_type="datetime", tools=plot_tools)
p2 = figure(plot_width=800, plot_height=300, x_axis_label='UTC Time', y_axis_label='Humidity (%)', x_axis_type="datetime",tools=plot_tools)
p3 = figure(plot_width=800, plot_height=300, x_axis_label='UTC Time', y_axis_label='Wind Speed (mph)', x_axis_type="datetime",tools=plot_tools)
p4 = figure(plot_width=800, plot_height=300, x_axis_label='UTC Time', y_axis_label='Airmass', x_axis_type="datetime",tools=plot_tools)
p5 = figure(plot_width=800, plot_height=300, x_axis_label='UTC Time', y_axis_label='Exptime (sec)', x_axis_type="datetime",tools=plot_tools)
p6 = figure(plot_width=800, plot_height=300, x_axis_label='Exposure', y_axis_label='Seeing (arcsec)', tools=plot_tools)
p1.circle(x = 'tel_time',y='mirror_temp',source=self.telem_source,color='orange', legend_label = 'Mirror', size=10, alpha=0.5)
p1.circle(x = 'tel_time',y='truss_temp',source=self.telem_source, legend_label = 'Truss', size=10, alpha=0.5)
p1.circle(x = 'tel_time',y='air_temp',source=self.telem_source, color='green',legend_label = 'Air', size=10, alpha=0.5)
p1.legend.location = "top_right"
p2.circle(x = 'tower_time',y='humidity',source=self.telem_source, size=10, alpha=0.5)
p3.circle(x = 'tower_time',y='wind_speed',source=self.telem_source, size=10, alpha=0.5)
p4.circle(x = 'exp_time',y='airmass',source=self.telem_source, size=10, alpha=0.5)
p5.circle(x = 'exp_time',y='exptime',source=self.telem_source, size=10, alpha=0.5)
p6.circle(x = 'exp',y='seeing',source=self.telem_source, size=10, alpha=0.5)
plot_layout = layout([self.title,
self.plot_subtitle,
p6,p1,p2,p3,p4,p5], width=1000)
self.plot_tab = Panel(child=plot_layout, title="Telemetry Plots")
def get_nl_layout(self):
exp_data = pd.DataFrame(columns = ['date_obs','id','program','sequence','flavor','exptime'])
self.explist_source = ColumnDataSource(exp_data)
columns = [TableColumn(field='date_obs', title='Time (UTC)', width=50, formatter=self.datefmt),
TableColumn(field='id', title='Exposure', width=50),
TableColumn(field='sequence', title='Sequence', width=100),
TableColumn(field='flavor', title='Flavor', width=50),
TableColumn(field='exptime', title='Exptime', width=50),
TableColumn(field='program', title='Program', width=300)]
self.exp_table = DataTable(source=self.explist_source, columns=columns, width=1000)
nl_layout = layout([self.title,
self.nl_subtitle,
self.nl_alert,
self.nl_text,
self.exptable_alert,
self.exp_table], width=1000)
self.nl_tab = Panel(child=nl_layout, title="Current DESI Night Log")
def get_img_layout(self):
img_layout = layout([self.title,
self.img_subtitle,
self.img_upinst,
self.img_upload,
self.img_inst,
self.img_input,
self.img_comment,
self.img_btn,
self.img_alert], width=1000)
self.img_tab = Panel(child=img_layout, title='Images')
def short_time(self, str_time):
"""Returns %H%M in whichever time zone selected
"""
try:
t = datetime.strptime(str_time, "%Y%m%dT%H:%M")
zone = self.kp_zone #zones[time_select.active]
time = datetime(t.year, t.month, t.day, t.hour, t.minute, tzinfo = zone)
return "{}:{}".format(str(time.hour).zfill(2), str(time.minute).zfill(2))
except:
return str_time
def get_time(self, time):
"""Returns strptime with utc. Takes time zone selection
"""
date = self.date_init.value
zone = self.kp_zone #zones[time_select.active]
try:
t = datetime.strptime(date+":"+time,'%Y%m%d:%H%M')
except:
try:
t = datetime.strptime(date+":"+time,'%Y%m%d:%I:%M%p')
except:
try:
t = datetime.strptime(date+":"+time,'%Y%m%d:%H:%M')
except:
pass #print("need format %H%M, %H:%M, %H:%M%p")
try:
tt = datetime(t.year, t.month, t.day, t.hour, t.minute, tzinfo = zone)
return tt.strftime("%Y%m%dT%H:%M")
except:
return time
def get_strftime(self, time):
date = self.date_init.value
year, month, day = int(date[0:4]), int(date[4:6]), int(date[6:8])
d = datetime(year, month, day)
dt = datetime.combine(d,time)
return dt.strftime("%Y%m%dT%H:%M")
def connect_log(self):
"""
Initialize Night Log with Input Date
"""
try:
date = datetime.strptime(self.date_init.value, '%Y%m%d')
except:
date = datetime.now()
self.night = str(date.year)+str(date.month).zfill(2)+str(date.day).zfill(2)
self.DESI_Log=nl.NightLog(str(date.year),str(date.month).zfill(2),str(date.day).zfill(2))
exists = self.DESI_Log.check_exists()
your_firstname, your_lastname = self.your_name.value.split(' ')[0], ' '.join(self.your_name.value.split(' ')[1:])
if exists:
self.connect_txt.text = 'Connected to Night Log for {}'.format(self.date_init.value)
meta_dict = self.DESI_Log.get_meta_data()
if self.report_type == 'DQS':
self.DESI_Log.add_dqs_observer(your_firstname, your_lastname)
self.your_name.value = meta_dict['{}_1'.format(self.report_type.lower())]+' '+meta_dict['{}_last'.format(self.report_type.lower())]
elif self.report_type == 'OS':
self.os_name_1.value = meta_dict['{}_1_first'.format(self.report_type.lower())]+' '+meta_dict['{}_1_last'.format(self.report_type.lower())]
self.os_name_2.value = meta_dict['{}_2_first'.format(self.report_type.lower())]+' '+meta_dict['{}_2_last'.format(self.report_type.lower())]
self.current_header()
#if self.location == 'nersc':
self.nl_file = os.path.join(self.DESI_Log.root_dir,'nightlog.html')
# else:
# self.nl_file = os.getcwd()+'/'+self.DESI_Log.root_dir+'nightlog.html'
self.nl_subtitle.text = "Current DESI Night Log: {}".format(self.nl_file)
if self.report_type == 'OS':
plan_txt_text="https://desi.lbl.gov/trac/wiki/DESIOperations/ObservingPlans/OpsPlan{}{}{}".format(date.year,str(date.month).zfill(2),str(date.day).zfill(2))
self.plan_txt.text = '<a href={}>Tonights Plan Here</a>'.format(plan_txt_text)
self.LO.value = meta_dict['os_lo_1']+' '+meta_dict['os_lo_last']
self.OA.value = meta_dict['os_oa_1']+' '+meta_dict['os_oa_last']
try:
self.weather_source.data = new_data
new_data = pd.read_csv(self.DESI_Log.weather_file)
new_data = new_data[['time','desc','temp','wind','humidity']]
except:
pass
if os.path.exists(self.DESI_Log.contributer_file):
cont_txt = ''
f = open(self.DESI_Log.contributer_file, "r")
for line in f:
cont_txt += line
self.contributer_list.value = cont_txt
if os.path.exists(self.DESI_Log.weather_file):
data = | pd.read_csv(self.DESI_Log.weather_file) | pandas.read_csv |
# import sys
# sys.path.append('JEMIPYC')
# from array_check_function_global import df,dfn,dfv,dfx,dfnx,dfvx
import pandas as pd
import numpy as np
tab = '__'
# no-extension , number of parameters is not limited, 2 or 3, whatever you want.
# ex) df(A,B,C,D,...,Z...)
# of course you just put one parameter.
def df(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
blank = pd.DataFrame(blank,columns=[tab])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = xx
else:
df_concat = pd.concat([df_concat,blank,xx], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfn(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
tabn = '{'+str(i+1)+'}'
blank = pd.DataFrame(blank,columns=[tabn])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfv(*x):
pd.reset_option('display.max_columns')
pd.reset_option('display.max_rows')
leng = len(x)
df_concat = []
for i in range(leng):
xs = x[i]
row=len(x[0])
blank = ['']*row
if((i+1)!=leng):
# print(i)
vname = x[-1][i]
# print(vname)
tabv = "<("+str(vname)+")"
blank = pd.DataFrame(blank,columns=[tabv])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
# print(df_concat)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
# extension
def dfx(*x):
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
blank = pd.DataFrame(blank,columns=[tab])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = xx
else:
df_concat = pd.concat([df_concat,blank,xx], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfnx(*x):
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
leng = len(x)
df_concat = []
for i in range(leng):
row=len(x[0])
blank = ['']*row
tabn = '{'+str(i+1)+'}'
blank = pd.DataFrame(blank,columns=[tabn])
xx = pd.DataFrame(x[i])
if(i==0):
df_concat = pd.concat([xx,blank], axis=1)
else:
df_concat = pd.concat([df_concat,xx,blank], axis=1)
df_concat.replace(np.nan, '', inplace=True)
display(df_concat)
def dfvx(*x):
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
# # Previous Applications
# ## About the data
# <blockquote>previous_application: This dataset has details of previous applications made by clients to Home Credit. Only those clients find place here who also exist in <i>application</i> data. Each current loan in the <i>application</i> data (identified by <i>SK_ID_CURR</i>) can have multiple previous loan applications. Each previous application has one row and is identified by the feature <i>SK_ID_PREV</i>.</blockquote>
#
# ## Feature Explanations
# <blockquote><p style="font-size:13px">
# SK_ID_PREV : ID of previous credit in Home credit related to loan in our sample. (One loan in our sample can have 0,1,2 or more previous loan applications in Home Credit, previous application could, but not necessarily have to lead to credit) <br>
# SK_ID_CURR: ID of loan in our sample<br>
# NAME_CONTRACT_TYPE: Contract product type (Cash loan, consumer loan [POS] ,...) of the previous application<br>
# AMT_ANNUITY: Annuity of previous application<br>
# AMT_APPLICATION: For how much credit did client ask on the previous application<br>
# AMT_CREDIT: Final credit amount on the previous application. This differs from AMT_APPLICATION in a way that the AMT_APPLICATION is the amount for which the client initially applied for, but during our approval process he could have received different amount - AMT_CREDIT<br>
# AMT_DOWN_PAYMENT: Down payment on the previous application<br>
# AMT_GOODS_PRICE: Goods price of good that client asked for (if applicable) on the previous application<br>
# WEEKDAY_APPR_PROCESS_START: On which day of the week did the client apply for previous application<br>
# HOUR_APPR_PROCESS_START: Approximately at what day hour did the client apply for the previous application<br>
# FLAG_LAST_APPL_PER_CONTRACT: Flag if it was last application for the previous contract. Sometimes by mistake of client or our clerk there could be more applications for one single contract<br>
# NFLAG_LAST_APPL_IN_DAY: Flag if the application was the last application per day of the client. Sometimes clients apply for more applications a day. Rarely it could also be error in our system that one application is in the database twice<br>
# NFLAG_MICRO_CASH: Flag Micro finance loan<br>
# RATE_DOWN_PAYMENT: Down payment rate normalized on previous credit<br>
# RATE_INTEREST_PRIMARY: Interest rate normalized on previous credit<br>
# RATE_INTEREST_PRIVILEGED: Interest rate normalized on previous credit<br>
# NAME_CASH_LOAN_PURPOSE: Purpose of the cash loan<br>
# NAME_CONTRACT_STATUS: Contract status (approved, cancelled, ...) of previous application<br>
# DAYS_DECISION: Relative to current application when was the decision about previous application made<br>
# NAME_PAYMENT_TYPE: Payment method that client chose to pay for the previous application<br>
# CODE_REJECT_REASON: Why was the previous application rejected<br>
# NAME_TYPE_SUITE: Who accompanied client when applying for the previous application<br>
# NAME_CLIENT_TYPE: Was the client old or new client when applying for the previous application<br>
# NAME_GOODS_CATEGORY: What kind of goods did the client apply for in the previous application<br>
# NAME_PORTFOLIO: Was the previous application for CASH, POS, CAR, …<br>
# NAME_PRODUCT_TYPE: Was the previous application x-sell o walk-in<br>
# CHANNEL_TYPE: Through which channel we acquired the client on the previous application<br>
# SELLERPLACE_AREA: Selling area of seller place of the previous application<br>
# NAME_SELLER_INDUSTRY: The industry of the seller<br>
# CNT_PAYMENT: Term of previous credit at application of the previous application<br>
# NAME_YIELD_GROUP: Grouped interest rate into small medium and high of the previous application<br>
# PRODUCT_COMBINATION: Detailed product combination of the previous application<br>
# DAYS_FIRST_DRAWING: Relative to application date of current application when was the first disbursement of the previous application<br>
# DAYS_FIRST_DUE: Relative to application date of current application when was the first due supposed to be of the previous application<br>
# DAYS_LAST_DUE_1ST_VERSION: Relative to application date of current application when was the first due of the previous application<br>
# DAYS_LAST_DUE: Relative to application date of current application when was the last due date of the previous application<br>
# DAYS_TERMINATION: Relative to application date of current application when was the expected termination of the previous application<br>
# NFLAG_INSURED_ON_APPROVAL: Did the client requested insurance during the previous application<br> </p></blockquote>
# In[1]:
# Last amended: 24rd October, 2020
# Myfolder: C:\Users\Administrator\OneDrive\Documents\home_credit_default_risk
# Objective:
# Solving Kaggle problem: Home Credit Default Risk
# Processing previous_application dataset
#
# Data Source: https://www.kaggle.com/c/home-credit-default-risk/data
# Ref: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features
# In[39]:
# 1.0 Libraries
# (Some of these may not be needed here.)
get_ipython().run_line_magic('reset', '-f')
import numpy as np
import pandas as pd
import gc
# 1.1 Reduce read data size
# There is a file reducing.py
# in this folder. A class
# in it is used to reduce
# dataframe size
# (Code modified by me to
# exclude 'category' dtype)
import reducing
# 1.2 Misc
import warnings
import os
warnings.simplefilter(action='ignore', category=FutureWarning)
# In[40]:
# 1.3
| pd.set_option('display.max_colwidth', -1) | pandas.set_option |
###########################################################################################################
## IMPORTS
###########################################################################################################
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
from keras.layers.advanced_activations import LeakyReLU, ELU, ReLU
from keras.models import Sequential, Model, model_from_json
from keras.layers import Activation, Convolution2D, Conv2D, LocallyConnected2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dense, Dropout, Input, concatenate, add, Add, ZeroPadding2D, GlobalMaxPooling2D, DepthwiseConv2D
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras.optimizers import Adam
from keras.regularizers import l2
#from keras.activations import linear, elu, tanh, relu
from keras import metrics, losses, initializers, backend
from keras.utils import multi_gpu_model
from keras.initializers import glorot_uniform, Constant, lecun_uniform
from keras import backend as K
os.environ["PATH"] += os.pathsep + "C:/ProgramData/Anaconda3/GraphViz/bin/"
os.environ["PATH"] += os.pathsep + "C:/Anaconda/Graphviz2.38/bin/"
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import train_test_split
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
tf.get_logger().setLevel('ERROR')
physical_devices = tf.config.list_physical_devices('GPU')
for pd_dev in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[pd_dev], True)
##from tensorflow.compat.v1.keras.backend import set_session
##config = tf.compat.v1.ConfigProto()
##config.gpu_options.per_process_gpu_memory_fraction = 0.9
##config.gpu_options.allow_growth = True
##config.log_device_placement = True
##set_session(config)
#config = tf.compat.v1.ConfigProto()
#config.gpu_options.allow_growth = True
#config.log_device_placement = True
#sess = tf.compat.v1.InteractiveSession(config = config)
#set_session(sess)
#backend.set_session(sess)
###########################################################################################################
## PLOTTING PALETTE
###########################################################################################################
# Create a dict object containing U.C. Berkeley official school colors for plot palette
# reference : https://alumni.berkeley.edu/brand/color-palette
berkeley_palette = {'berkeley_blue' : '#003262',
'california_gold' : '#FDB515',
'metallic_gold' : '#BC9B6A',
'founders_rock' : '#2D637F',
'medalist' : '#E09E19',
'bay_fog' : '#C2B9A7',
'lawrence' : '#00B0DA',
'sather_gate' : '#B9D3B6',
'pacific' : '#53626F',
'soybean' : '#9DAD33',
'california_purple' : '#5C3160',
'south_hall' : '#6C3302'}
###########################################################################################################
## CLASS CONTAINING MODEL ZOO
###########################################################################################################
class Models(object):
def __init__(self, model_path, **kwargs):
super(Models, self).__init__(** kwargs)
# validate that the constructor parameters were provided by caller
if (not model_path):
raise RuntimeError('path to model files must be provided on initialization.')
# ensure all are string snd leading/trailing whitespace removed
model_path = str(model_path).replace('\\', '/').strip()
if (not model_path.endswith('/')): model_path = ''.join((model_path, '/'))
# validate the existence of the data path
if (not os.path.isdir(model_path)):
raise RuntimeError("Models path specified'%s' is invalid." % model_path)
self.__models_path = model_path
self.__GPU_count = len(tf.config.list_physical_devices('GPU'))
self.__MIN_early_stopping = 10
#------------------------------------------------
# Private Methods
#------------------------------------------------
# plotting method for keras history arrays
def __plot_keras_history(self, history, metric, model_name, feature_name, file_name, verbose = False):
# Plot the performance of the model training
fig = plt.figure(figsize=(15,8),dpi=80)
ax = fig.add_subplot(121)
ax.plot(history.history[metric][1:], color = berkeley_palette['founders_rock'], label = 'Train',
marker = 'o', markersize = 4, alpha = 0.9)
ax.plot(history.history["".join(["val_",metric])][1:], color = berkeley_palette['medalist'], label = 'Validation',
marker = 'o', markersize = 4, alpha = 0.9)
ax.set_title(" ".join(['Model Performance',"(" + model_name + ")"]) + "\n" + feature_name,
color = berkeley_palette['berkeley_blue'], fontsize = 15, fontweight = 'bold')
ax.spines["top"].set_alpha(.0)
ax.spines["bottom"].set_alpha(.3)
ax.spines["right"].set_alpha(.0)
ax.spines["left"].set_alpha(.3)
ax.set_xlabel("Epoch", fontsize = 12, horizontalalignment='right', x = 1.0, color = berkeley_palette['berkeley_blue'])
ax.set_ylabel(metric, fontsize = 12, horizontalalignment='right', y = 1.0, color = berkeley_palette['berkeley_blue'])
plt.legend(loc = 'upper right')
ax = fig.add_subplot(122)
ax.plot(history.history['loss'][1:], color = berkeley_palette['founders_rock'], label = 'Train',
marker = 'o', markersize = 4, alpha = 0.9)
ax.plot(history.history["".join(["val_loss"])][1:], color = berkeley_palette['medalist'], label = 'Validation',
marker = 'o', markersize = 4, alpha = 0.9)
ax.set_title(" ".join(['Model Performance',"(" + model_name + ")"]) + "\n" + feature_name,
color = berkeley_palette['berkeley_blue'], fontsize = 15, fontweight = 'bold')
ax.spines["top"].set_alpha(.0)
ax.spines["bottom"].set_alpha(.3)
ax.spines["right"].set_alpha(.0)
ax.spines["left"].set_alpha(.3)
ax.set_xlabel("Epoch", fontsize = 12, horizontalalignment='right', x = 1.0, color = berkeley_palette['berkeley_blue'])
ax.set_ylabel("Loss", fontsize = 12, horizontalalignment='right', y = 1.0, color = berkeley_palette['berkeley_blue'])
plt.legend(loc = 'upper right')
plt.tight_layout()
plt.savefig(file_name, dpi=300)
if verbose: print("Training plot file saved to '%s'." % file_name)
plt.close()
# load Keras model files from json / h5
def __load_keras_model(self, model_name, model_file, model_json, verbose = False):
"""Loads a Keras model from disk"""
if not os.path.isfile(model_file):
raise RuntimeError("Model file '%s' does not exist; exiting inferencing." % model_file)
if not os.path.isfile(model_json):
raise RuntimeError("Model file '%s' does not exist; exiting inferencing." % model_json)
# load model file
if verbose: print("Retrieving model: %s..." % model_name)
json_file = open(model_json, "r")
model_json_data = json_file.read()
json_file.close()
model = model_from_json(model_json_data)
model.load_weights(model_file)
return model
# Performs standard scaling on a 4D image
def __4d_Scaler(self, arr, ss, fit = False, verbose = False):
"""Performs standard scaling of the 4D array with the 'ss' model provided by caller"""
#Unwinds a (instances, rows, columns, layers) array to 2D for standard scaling
num_instances, num_rows, num_columns, num_layers = arr.shape
arr_copy = np.reshape(arr, (-1, num_columns))
# fit the standard scaler
if fit:
if verbose: print("Fitting SCALER and transforming...")
arr_copy = ss.fit_transform(arr_copy)
else:
if verbose: print("Transforming SCALER only...")
arr_copy = ss.transform(arr_copy)
arr = np.reshape(arr_copy, (num_instances, num_rows, num_columns, num_layers))
return arr
# resnet identity block builder
def __identity_block(self, model, kernel_size, filters, stage, block):
"""modularized identity block for resnet"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(model)
x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)
x = add([x, model])
x = Activation('relu')(x)
return x
# resnet conv block builder
def __conv_block(self, model, kernel_size, filters, stage, block, strides=(2, 2)):
"""conv block builder for resnet"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(model)
x = BatchNormalization(axis=3, name=bn_name_base + '2a')(x)
x =Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=3, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(model)
shortcut = BatchNormalization(
axis=3, name=bn_name_base + '1')(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
# create a layerable inception module
def __inception_module(self, model, filters_1x1, filters_3x3_reduce, filters_3x3,
filters_5x5_reduce, filters_5x5, filters_pool_proj, kernel_init, bias_init, name = None):
"""modularized inception block for layering"""
# Connection Layer 1 (1x1)
conv_1x1 = Convolution2D(filters_1x1, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
# Connection Layer 2 (3x3)
conv_3x3 = Convolution2D(filters_3x3_reduce, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
conv_3x3 = Convolution2D(filters_3x3, (3, 3), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (conv_3x3)
# Connection Layer 3 (5x5)
conv_5x5 = Convolution2D(filters_5x5_reduce, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (model)
conv_5x5 = Convolution2D(filters_5x5, (5, 5), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (conv_5x5)
# Connection Layer 4 (pool)
pool_proj = MaxPooling2D((3, 3), strides = (1, 1), padding = 'same') (model)
pool_proj = Convolution2D(filters_pool_proj, (1, 1), padding = 'same', activation = 'relu',
kernel_initializer = kernel_init, bias_initializer = bias_init) (pool_proj)
# Concatenation layer
output = concatenate(inputs = [conv_1x1, conv_3x3, conv_5x5, pool_proj], axis = 3, name = name)
return output
# return an InceptionV3 output tensor after applying Conv2D and BatchNormalization
def __conv2d_bn(self, x, filters, num_row, num_col, padding = 'same', strides = (1, 1), name = None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 3
x = Convolution2D(filters, (num_row, num_col), strides = strides,
padding = padding, use_bias = False, name = conv_name) (x)
x = BatchNormalization(axis = bn_axis, scale = False, name = bn_name) (x)
x = ReLU(name = name) (x)
return x
# a residual block for resnext
def __resnext_block(self, x, filters, kernel_size = 3, stride = 1, groups = 32, conv_shortcut = True, name = None):
if conv_shortcut is True:
shortcut = Conv2D((64 // groups) * filters, 1, strides = stride, use_bias = False, name = name + '_0_conv') (x)
shortcut = BatchNormalization(axis = 3, epsilon=1.001e-5, name = name + '_0_bn') (shortcut)
else:
shortcut = x
x = Conv2D(filters, 1, use_bias = False, name = name + '_1_conv') (x)
x = BatchNormalization(axis = 3, epsilon = 1.001e-5, name = name + '_1_bn') (x)
x = Activation('relu', name = name + '_1_relu') (x)
c = filters // groups
x = ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
x = DepthwiseConv2D(kernel_size, strides = stride, depth_multiplier = c, use_bias = False, name = name + '_2_conv') (x)
kernel = np.zeros((1, 1, filters * c, filters), dtype = np.float32)
for i in range(filters):
start = (i // c) * c * c + i % c
end = start + c * c
kernel[:, :, start:end:c, i] = 1.
x = Conv2D(filters, 1, use_bias = False, trainable = False, kernel_initializer = {'class_name': 'Constant','config': {'value': kernel}}, name = name + '_2_gconv') (x)
x = BatchNormalization(axis=3, epsilon = 1.001e-5, name = name + '_2_bn') (x)
x = Activation('relu', name=name + '_2_relu') (x)
x = Conv2D((64 // groups) * filters, 1, use_bias = False, name = name + '_3_conv') (x)
x = BatchNormalization(axis = 3, epsilon=1.001e-5, name = name + '_3_bn') (x)
x = Add(name = name + '_add') ([shortcut, x])
x = Activation('relu', name = name + '_out') (x)
return x
# a set of stacked residual blocks for ResNeXt
def __resnext_stack(self, x, filters, blocks, stride1 = 2, groups = 32, name = None, dropout = None):
x = self.__resnext_block(x, filters, stride = stride1, groups = groups, name = name + '_block1')
for i in range(2, blocks + 1):
x = self.__resnext_block(x, filters, groups = groups, conv_shortcut = False,
name = name + '_block' + str(i))
if not dropout is None:
x = Dropout(dropout) (x)
return x
def __bn_relu(self, x, bn_name = None, relu_name = None):
norm = BatchNormalization(axis = 3, name = bn_name) (x)
return Activation("relu", name = relu_name) (norm)
def __bn_relu_conv(self, **conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = self.__bn_relu(x, bn_name = bn_name, relu_name = relu_name)
return Conv2D(filters = filters, kernel_size = kernel_size,
strides = strides, padding = padding,
dilation_rate = dilation_rate,
kernel_initializer = kernel_initializer,
kernel_regularizer = kernel_regularizer,
name = conv_name) (activation)
return f
def __conv_bn_relu(self, **conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
x = Conv2D(filters = filters, kernel_size = kernel_size,
strides = strides, padding = padding,
dilation_rate = dilation_rate,
kernel_initializer = kernel_initializer,
kernel_regularizer = kernel_regularizer,
name = conv_name) (x)
return self.__bn_relu(x, bn_name = bn_name, relu_name = relu_name)
return f
def __block_name_base(self, stage, block):
if block < 27:
block = '%c' % (block + 97) # 97 is the ascii number for lowercase 'a'
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
return conv_name_base, bn_name_base
def __shortcut(self, input_feature, residual, conv_name_base = None, bn_name_base = None):
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[1] / residual_shape[1]))
stride_height = int(round(input_shape[2] / residual_shape[2]))
equal_channels = input_shape[3] == residual_shape[3]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
shortcut = Conv2D(filters=residual_shape[3],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=3,
name=bn_name_base)(shortcut)
return add([shortcut, residual])
def __basic_block(self, filters, stage, block, transition_strides = (1, 1),
dilation_rate = (1, 1), is_first_block_of_first_layer = False, dropout = None,
residual_unit = None):
def f(input_features):
conv_name_base, bn_name_base = self.__block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters = filters, kernel_size = (3, 3),
strides = transition_strides, dilation_rate = dilation_rate,
padding = "same", kernel_initializer = "he_normal", kernel_regularizer = l2(1e-4),
name = conv_name_base + '2a') (input_features)
else:
x = residual_unit(filters = filters, kernel_size = (3, 3),
strides = transition_strides,
dilation_rate = dilation_rate,
conv_name_base = conv_name_base + '2a',
bn_name_base = bn_name_base + '2a') (input_features)
if dropout is not None:
x = Dropout(dropout) (x)
x = residual_unit(filters = filters, kernel_size = (3, 3),
conv_name_base = conv_name_base + '2b',
bn_name_base = bn_name_base + '2b') (x)
return self.__shortcut(input_features, x)
return f
def __bottleneck(self, filters, stage, block, transition_strides = (1, 1),
dilation_rate = (1, 1), is_first_block_of_first_layer = False, dropout = None,
residual_unit = None):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input_feature):
conv_name_base, bn_name_base = self.__block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
x = Conv2D(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_feature)
else:
x = residual_unit(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_feature)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters * 4, kernel_size=(1, 1),
conv_name_base=conv_name_base + '2c',
bn_name_base=bn_name_base + '2c')(x)
return self.__shortcut(input_feature, x)
return f
# builds a residual block for resnet with repeating bottleneck blocks
def __residual_block(self, block_function, filters, blocks, stage, transition_strides = None, transition_dilation_rates = None,
dilation_rates = None, is_first_layer = False, dropout = None, residual_unit = None):
if transition_dilation_rates is None:
transition_dilation_rates = [(1, 1)] * blocks
if transition_strides is None:
transition_strides = [(1, 1)] * blocks
if dilation_rates is None:
dilation_rates = [1] * blocks
def f(x):
for i in range(blocks):
is_first_block = is_first_layer and i == 0
x = block_function(filters=filters, stage=stage, block=i,
transition_strides=transition_strides[i],
dilation_rate=dilation_rates[i],
is_first_block_of_first_layer=is_first_block,
dropout=dropout,
residual_unit=residual_unit)(x)
return x
return f
######################################################
######################################################
######################################################
### KERAS MODEL ZOO
######################################################
######################################################
######################################################
#------------------------------------------------
# NaimishNet Model
# ref: https://arxiv.org/abs/1710.00977
#------------------------------------------------
def get_keras_naimishnet(self, X, Y, batch_size, epoch_count, X_val = None, Y_val = None, val_split = 0.1, shuffle = True,
feature_name = "unknown", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - NaimishNet"
__MODEL_FNAME_PREFIX = "KERAS_NAIMISHNET/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, "_model_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, "_", feature_name, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
#ke = initializers.lecun_uniform(seed = 42)
ke = 'glorot_uniform'
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
l1 = Input((96, 96, 1))
l2 = Convolution2D(32, (4, 4), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l1)
#l3 = ELU() (l2)
l3 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l2)
l4 = Dropout(rate = 0.1) (l3)
l5 = Convolution2D(64, (3, 3), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l4)
#l7 = ELU() (l6)
l6 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l5)
l7 = Dropout(rate = 0.2) (l6)
l8 = Convolution2D(128, (2, 2), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l7)
#l11 = ELU() (l10)
l9 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l8)
l10 = Dropout(rate = 0.3) (l9)
l11 = Convolution2D(256, (1, 1), kernel_initializer = ke, padding = 'valid', activation = 'elu') (l10)
#l15 = ELU() (l14)
l12 = MaxPooling2D(pool_size=(2,2), strides = (2,2)) (l11)
l13 = Dropout(rate = 0.4) (l12)
l14 = Flatten() (l13)
l15 = Dense(1000, activation = 'elu') (l14)
#l20 = ELU() (l19)
l16 = Dropout(rate = 0.5) (l15)
#l22 = Dense(1000) (l21)
#l23 = linear(l22)
l17 = Dense(1000, activation = 'linear') (l16)
l18 = Dropout(rate = 0.6) (l17)
l19 = Dense(2) (l18)
model = Model(inputs = [l1], outputs = [l19])
model.compile(optimizer = act, loss = lss, metrics = mtrc)
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', model_name = __MODEL_NAME,
feature_name = feature_name, file_name = __history_plot_file, verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
modparallel_modelel = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_naimishnet(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - NaimishNet"
__MODEL_FNAME_PREFIX = "KERAS_NAIMISHNET/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "NaimishNet_", feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#------------------------------------------------
# Kaggle1 Model
# Inspired by: https://www.kaggle.com/balraj98/data-augmentation-for-facial-keypoint-detection
#------------------------------------------------
def get_keras_kaggle1(self, X, Y, batch_size, epoch_count, val_split = 0.05, X_val = None, Y_val = None, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle1"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE1/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-08)
act = 'adam'
#lss = losses.mean_squared_error
lss = 'mean_squared_error'
#mtrc = [metrics.RootMeanSquaredError()]
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
# Input dimensions: (None, 96, 96, 1)
model.add(Convolution2D(32, (3,3), padding='same', use_bias=False, input_shape=(96,96,1)))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 96, 96, 32)
model.add(Convolution2D(32, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.2))
# Input dimensions: (None, 48, 48, 32)
model.add(Convolution2D(64, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 48, 48, 64)
model.add(Convolution2D(64, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.25))
# Input dimensions: (None, 24, 24, 64)
model.add(Convolution2D(96, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 24, 24, 96)
model.add(Convolution2D(96, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.15))
# Input dimensions: (None, 12, 12, 96)
model.add(Convolution2D(128, (3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 12, 12, 128)
model.add(Convolution2D(128, (3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.3))
# Input dimensions: (None, 6, 6, 128)
model.add(Convolution2D(256, (3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 6, 6, 256)
model.add(Convolution2D(256, (3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# CDB: 3/5 DROPOUT ADDED
model.add(Dropout(0.2))
# Input dimensions: (None, 3, 3, 256)
model.add(Convolution2D(512, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Convolution2D(512, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# TEST added 4/8
model.add(Dropout(0.3))
model.add(Convolution2D(1024, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Convolution2D(1024, (3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
# Input dimensions: (None, 3, 3, 512)
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
# CDB DROPOUT INCREASED FROM 0.1 to 0.2
model.add(Dropout(0.15))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', #metric = 'root_mean_squared_error',
model_name = __MODEL_NAME, feature_name = feature_name, file_name = __history_plot_file,
verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
if verbose: print("Model JSON, history, and parameters file saved.")
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_kaggle1(self, X, feature_name = "unknown", full = True, verbose = False):
__MODEL_NAME = "Keras - Kaggle1"
__MODEL_FNAME_PREFIX = "KERAS_KAGGLE1/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
##__scaler_file = "".join([nested_dir, feature_name, "_scaler.pkl"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):## or (not os.path.isfile(__scaler_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" % ##'%s'\n" %
(__model_file_name, __model_json_file))##, __scaler_file))
# Load the training scaler for this model
##if verbose: print("Loading SCALER for '%s' and zero-centering X." % feature_name)
##scaler = pickle.load(open(__scaler_file, "rb"))
##X = self.__4d_Scaler(arr = X, ss = scaler, fit = False, verbose = verbose)
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# LeNet5 Model
# Inspired by: Google's LeNet5 for MNSIST - Modified
#-------------------------------------------------------------
def get_keras_lenet5(self, X, Y, batch_size, epoch_count, X_val = None, Y_val = None, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, full = True, verbose = False):
__MODEL_NAME = "Keras - LeNet5"
__MODEL_FNAME_PREFIX = "KERAS_LENET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_model_plot.png"])
__history_params_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, "_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % feature_name)
#if (X_val is None) or (Y_val is None):
# if verbose: print("No validation set specified; creating a split based on %.2f val_split parameter." % val_split)
# X, Y, X_val, Y_val = train_test_split(X, Y, test_size = val_split, random_state = 42)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp = ModelCheckpoint(filepath = __model_file_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_mae')
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
model = Sequential()
model.add(Convolution2D(filters = 6, kernel_size = (3, 3), input_shape = (96, 96, 1)))
model.add(ReLU())
# CDB: 3/5 added Batch Normalization
#model.add(BatchNormalization())
model.add(AveragePooling2D())
#model.add(Dropout(0.2))
model.add(Convolution2D(filters = 16, kernel_size = (3, 3)))
model.add(ReLU())
# CDB: 3/5 added Batch Normalization
#model.add(BatchNormalization())
model.add(AveragePooling2D())
#model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(ReLU())
#model.add(Dropout(0.1))
model.add(Dense(256))
model.add(ReLU())
#model.add(Dropout(0.2))
if full:
model.add(Dense(30))
else:
model.add(Dense(8))
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, Y, validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
else:
history = parallel_model.fit(X, Y, validation_data = (X_val, Y_val), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp], verbose = verbose)
# print and/or save a performance plot
self.__plot_keras_history(history = history, metric = 'mse', model_name = __MODEL_NAME,
feature_name = feature_name, file_name = __history_plot_file, verbose = verbose)
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = pd.DataFrame(history.history)
hist.to_csv(__history_performance_file)
# save a plot of the model architecture
plot_model(parallel_model, to_file = __model_architecture_plot_file, rankdir = 'TB',
show_shapes = True, show_layer_names = True, expand_nested = True, dpi=300)
if verbose: print("Model JSON, history, and parameters file saved.")
else:
if verbose: print("Loading history and params files for '%s' MODEL..." % feature_name)
hist_params = pd.read_csv(__history_params_file)
hist = pd.read_csv(__history_performance_file)
if verbose: print("Loading pickle file for '%s' MODEL from file '%s'" % (feature_name, __model_file_name))
parallel_model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
return parallel_model, hist_params, hist
# inferencing
def predict_keras_lenet5(self, X, feature_name = "ALL_FEATURES", full = True, verbose = False):
__MODEL_NAME = "Keras - LeNet5"
__MODEL_FNAME_PREFIX = "KERAS_LENET5/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
raise RuntimeError("Model path '%s' does not exist; exiting inferencing." % nested_dir)
__model_file_name = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, feature_name, __MODEL_SUFFIX, ".json"])
if (not os.path.isfile(__model_file_name)) or (not os.path.isfile(__model_json_file)):
raise RuntimeError("One or some of the following files are missing; prediction cancelled:\n\n'%s'\n'%s'\n" %
(__model_file_name, __model_json_file))
# load the Keras model for the specified feature
model = self.__load_keras_model(__MODEL_NAME, __model_file_name, __model_json_file, verbose = verbose)
# predict
if verbose: print("Predicting %d (x,y) coordinates for '%s'..." % (len(X), feature_name))
Y = model.predict(X, verbose = verbose)
if verbose: print("Predictions completed!")
return Y
#-------------------------------------------------------------
# Inception V1
# Inspired by : https://arxiv.org/abs/1409.4842
#-------------------------------------------------------------
def get_keras_inception(self, X, Y, batch_size, epoch_count, val_split = 0.1, shuffle = True,
feature_name = "ALL_FEATURES", recalculate_pickle = True, X_val = None, Y_val = None, full = True, verbose = False):
__MODEL_NAME = "Keras - Inception"
__MODEL_FNAME_PREFIX = "KERAS_INCEPTION/"
if full:
__MODEL_SUFFIX = "_30"
else:
__MODEL_SUFFIX = "_8"
nested_dir = "".join([self.__models_path,__MODEL_FNAME_PREFIX])
if not os.path.exists(nested_dir):
os.makedirs(nested_dir)
__model_file_MAIN_name = "".join([nested_dir, "inception_MAIN_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX1_name = "".join([nested_dir, "inception_AUX1_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_file_AUX2_name = "".join([nested_dir, "inception_AUX2_", feature_name, __MODEL_SUFFIX, ".h5"])
__model_json_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, ".json"])
__model_architecture_plot_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_plot.png"])
__history_params_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_params.csv"])
__history_performance_file = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_history.csv"])
__history_plot_file_main = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_main_output_mse_plot.png"])
__history_plot_file_auxilliary1 = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_auxilliary_output_1_mse_plot.png"])
__history_plot_file_auxilliary2 = "".join([nested_dir, "inception_", feature_name, __MODEL_SUFFIX, "_auxilliary_output_2_mse_plot.png"])
if verbose: print("Retrieving model: %s..." % "".join([__MODEL_NAME, __MODEL_SUFFIX]))
# Create or load the model
if (not os.path.isfile(__model_file_MAIN_name)) or (not os.path.isfile(__model_file_AUX1_name)) or (not os.path.isfile(__model_file_AUX2_name)) or (not os.path.isfile(__model_json_file)) or recalculate_pickle:
if verbose: print("Pickle file for '%s' MODEL not found or skipped by caller." % __MODEL_NAME)
act = Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
lss = 'mean_squared_error'
mtrc = ['mae','mse']
stop_at = np.max([int(0.1 * epoch_count), self.__MIN_early_stopping])
es = EarlyStopping(patience = stop_at, verbose = verbose)
cp_main = ModelCheckpoint(filepath = __model_file_MAIN_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_main_output_mae')
cp_aux1 = ModelCheckpoint(filepath = __model_file_AUX1_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_auxilliary_output_1_mae')
cp_aux2 = ModelCheckpoint(filepath = __model_file_AUX2_name, verbose = verbose, save_best_only = True,
mode = 'min', monitor = 'val_auxilliary_output_2_mae')
kernel_init = glorot_uniform()
bias_init = Constant(value = 0.2)
if self.__GPU_count > 1: dev = "/cpu:0"
else: dev = "/gpu:0"
with tf.device(dev):
# Input image shape (H, W, C)
input_img = Input(shape=(96, 96, 1))
# Top Layer (Begin MODEL)
model = Convolution2D(filters = 64, kernel_size = (7, 7), padding = 'same', strides = (2, 2),
activation = 'relu', name = 'conv_1_7x7/2', kernel_initializer = kernel_init,
bias_initializer = bias_init) (input_img)
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_1_3x3/2') (model)
model = Convolution2D(64, (1, 1), padding = 'same', strides = (1, 1), activation = 'relu', name = 'conv_2a_3x3/1') (model)
model = Convolution2D(192, (3, 3), padding = 'same', strides = (1, 1), activation = 'relu', name = 'conv_2b_3x3/1') (model)
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_2_3x3/2') (model)
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 64,
filters_3x3_reduce = 96,
filters_3x3 = 128,
filters_5x5_reduce = 16,
filters_5x5 = 32,
filters_pool_proj = 32,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_3a')
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 128,
filters_3x3_reduce = 128,
filters_3x3 = 192,
filters_5x5_reduce = 32,
filters_5x5 = 96,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_3b')
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name= 'max_pool_3_3x3/2') (model)
# Inception Module
model = self.__inception_module(model,
filters_1x1 = 192,
filters_3x3_reduce = 96,
filters_3x3 = 208,
filters_5x5_reduce = 16,
filters_5x5 = 48,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name = 'inception_4a')
# CDB 3/5 DROPOUT ADDED
model = Dropout(0.2) (model)
# Begin MODEL1 (auxillary output)
model1 = AveragePooling2D((5, 5), padding = 'same', strides = 3, name= 'avg_pool_4_5x5/2') (model)
model1 = Convolution2D(128, (1, 1), padding = 'same', activation = 'relu') (model1)
model1 = Flatten() (model1)
model1 = Dense(1024, activation = 'relu') (model1)
model1 = Dropout(0.3) (model1)
if full:
model1 = Dense(30, name = 'auxilliary_output_1') (model1)
else:
model1 = Dense(8, name = 'auxilliary_output_1') (model1)
# Resume MODEL w/ Inception
model = self.__inception_module(model,
filters_1x1 = 160,
filters_3x3_reduce = 112,
filters_3x3 = 224,
filters_5x5_reduce = 24,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4b')
model = self.__inception_module(model,
filters_1x1 = 128,
filters_3x3_reduce = 128,
filters_3x3 = 256,
filters_5x5_reduce = 24,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4c')
model = self.__inception_module(model,
filters_1x1 = 112,
filters_3x3_reduce = 144,
filters_3x3 = 288,
filters_5x5_reduce = 32,
filters_5x5 = 64,
filters_pool_proj = 64,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4d')
# CDB : 3/5 DROPOUT ADDED
model = Dropout(0.2) (model)
# Begin MODEL2 (auxilliary output)
model2 = AveragePooling2D((5, 5), strides = 3) (model)
model2 = Convolution2D(128, (1, 1), padding = 'same', activation = 'relu') (model2)
model2 = Flatten() (model2)
model2 = Dense(1024, activation = 'relu') (model2)
model2 = Dropout(0.3) (model2)
if full:
model2 = Dense(30, name = 'auxilliary_output_2') (model2)
else:
model2 = Dense(8, name = 'auxilliary_output_2') (model2)
# Resume MODEL w/ Inception
model = self.__inception_module(model,
filters_1x1 = 256,
filters_3x3_reduce = 160,
filters_3x3 = 320,
filters_5x5_reduce = 32,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_4e')
model = MaxPooling2D((3, 3), padding = 'same', strides = (2, 2), name = 'max_pool_4_3x3/2') (model)
model = self.__inception_module(model,
filters_1x1 = 256,
filters_3x3_reduce = 160,
filters_3x3 = 320,
filters_5x5_reduce = 32,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_5a')
model = self.__inception_module(model,
filters_1x1 = 384,
filters_3x3_reduce = 192,
filters_3x3 = 384,
filters_5x5_reduce = 48,
filters_5x5 = 128,
filters_pool_proj = 128,
kernel_init = kernel_init,
bias_init = bias_init,
name='inception_5b')
model = GlobalAveragePooling2D(name = 'avg_pool_5_3x3/1') (model)
model = Dropout(0.3) (model)
# Output Layer (Main)
if full:
model = Dense(30, name = 'main_output') (model)
else:
model = Dense(8, name = 'main_output') (model)
model = Model(input_img, [model, model1, model2], name = 'Inception')
if verbose: print(model.summary())
# Compile the model
if self.__GPU_count > 1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
parallel_model = multi_gpu_model(model, gpus = self.__GPU_count)
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
else:
parallel_model = model
parallel_model.compile(optimizer = act, loss = lss, metrics = mtrc)
if (X_val is None) or (Y_val is None):
history = parallel_model.fit(X, [Y, Y, Y], validation_split = val_split, batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main, cp_aux1, cp_aux2], verbose = verbose)
else:
history = parallel_model.fit(X, [Y, Y, Y], validation_data = (X_val, [Y_val, Y_val, Y_val]), batch_size = batch_size * self.__GPU_count,
epochs = epoch_count, shuffle = shuffle, callbacks = [es, cp_main, cp_aux1, cp_aux2], verbose = verbose)
# print and/or save a performance plot
for m, f in zip(['main_output_mse', 'auxilliary_output_1_mse', 'auxilliary_output_2_mse'],
[__history_plot_file_main, __history_plot_file_auxilliary1, __history_plot_file_auxilliary2]):
try:
self.__plot_keras_history(history = history, metric = m, model_name = __MODEL_NAME,
feature_name = feature_name, file_name = f, verbose = False)
except:
print("error during history plot generation; skipped.")
pass
# save the model, parameters, and performance history
model_json = parallel_model.to_json()
with open(__model_json_file, "w") as json_file:
json_file.write(model_json)
hist_params = pd.DataFrame(history.params)
hist_params.to_csv(__history_params_file)
hist = | pd.DataFrame(history.history) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, sparse=sparse)
expected = DataFrame(
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
dtype=np.uint8,
)
if sparse:
expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": SparseArray([0, 1, 0], dtype="uint8"),
"B_b": SparseArray([1, 1, 0], dtype="uint8"),
"B_c": SparseArray([0, 0, 1], dtype="uint8"),
}
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
}
)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ["from_A", "from_B"]
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected = expected[["C"] + cols]
typ = SparseArray if sparse else Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix="bad", sparse=sparse)
bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"]
expected = DataFrame(
[[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]],
columns=["C"] + bad_columns,
dtype=np.uint8,
)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat(
[
Series([1, 2, 3], name="C"),
Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
],
axis=1,
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse)
expected = DataFrame(
{
"B": ["b", "b", "c"],
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep="..", sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"A..a": [1, 0, 1],
"A..b": [0, 1, 0],
"B..b": [1, 1, 0],
"B..c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse)
expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"})
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=["too few"], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=["bad"], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {"A": "from_A", "B": "from_B"}
df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
}
)
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index(
axis=1
)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_a": arr([1, 0, 1, 0], dtype=typ),
"A_b": arr([0, 1, 0, 0], dtype=typ),
"A_nan": arr([0, 0, 0, 1], dtype=typ),
"B_b": arr([1, 1, 0, 0], dtype=typ),
"B_c": arr([0, 0, 1, 0], dtype=typ),
"B_nan": arr([0, 0, 0, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
"cat_x": arr([1, 0, 0], dtype=typ),
"cat_y": arr([0, 1, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"get_dummies_kwargs,expected",
[
(
{"data": DataFrame({"ä": ["a"]})},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["ä"]})},
DataFrame({"x_ä": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
DataFrame({"xäa": [1]}, dtype=np.uint8),
),
],
)
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list("aaa")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
expected = DataFrame(index=list("ABC"))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ["a", "b", np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({"b": [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse)
exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=np.uint8).reindex(
["b", np.nan], axis=1
)
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies(
[np.nan], dummy_na=True, drop_first=True, sparse=sparse
)
exp_just_na = DataFrame(index=np.arange(1))
| tm.assert_frame_equal(res_just_na, exp_just_na) | pandas._testing.assert_frame_equal |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
expected[4][2] = datetime(2262,4,16)
expected[4][3] = expected[4][4] = datetime(2262,4,1)
expected[4][5] = expected[4][6] = datetime(2262,1,1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category') for col in original], axis=1)
expected['incompletely_labeled'] = expected['incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
tm.assert_equal(len(w), 1) # should get a warning for mixed content
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c', 'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd', 'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', ['a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', ['a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'], np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort("srh")
parsed_117 = parsed_117.sort("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
expected = pd.Series(pd.Categorical.from_codes(codes=codes,
categories=categories))
| tm.assert_series_equal(expected, parsed_115["srh"]) | pandas.util.testing.assert_series_equal |
from datetime import datetime
import pandas as pd
import robin_stocks as r
import time
import logging
ETH_ID = "76637d50-c702-4ed1-bcb5-5b0732a81f48"
log = logging.getLogger(__name__)
class LstmDataManager:
data: pd.DataFrame = None
# Used for simulation only
end_index = None
def __init__(self, simulation_mode=False):
log.info(f"Initializing LstmDataManager. Simulation mode: {simulation_mode}")
log.info(f"Loading data from dump file...")
old_data = pd.read_csv("data/ETH.csv")
# old_data.columns = ["time", "price"]
self.data = self.averageAndRename(old_data)
# self.data = old_data
log.info(
f"Load data complete: {len(self.data)} rows loaded. Latest row time: {self.data.time.iat[-1]}"
)
if simulation_mode:
self.end_index = 500 # start with 500 samples
@staticmethod
def averageAndRename(begin_high_low_dataFrame: pd.DataFrame):
new_dataFrame = pd.DataFrame()
new_dataFrame["price"] = begin_high_low_dataFrame[
["high_price", "low_price"]
].mean(axis=1)
new_dataFrame["time"] = begin_high_low_dataFrame["begins_at"]
new_dataFrame = new_dataFrame.set_index(
pd.DatetimeIndex(begin_high_low_dataFrame["begins_at"].values)
)
return new_dataFrame
@staticmethod
def appendFromApi(current_list):
def getHourlyHistory() -> dict:
url = r.urls.crypto_historical(ETH_ID)
payload = {"interval": "15second", "span": "hour", "bounds": "24_7"}
data = None
try:
data = r.helper.request_get(url, "regular", payload)
except:
log.error("Request call for Crypto historical data failed")
if (
(not data)
or (not type(data) is dict)
or (data.get("data_points") == None)
):
log.error(
f"Unable to fetch data from robinhood: Trying again in 60 seconds."
)
time.sleep(60)
return getHourlyHistory()
else:
return data["data_points"]
# raw_data = r.crypto.get_crypto_historicals("ETH", interval='15second', span='hour', bounds='24_7', info=None)
log.info("Getting past hourly data...")
raw_data = getHourlyHistory()
data = | pd.DataFrame(raw_data) | pandas.DataFrame |
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr+1, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
tnames = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, tnames).decode('utf-8')
msvmsc.columns = colnam
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
"""
Derive Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
progress : bool, optional; print progress of the function if True
Returns
-------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
Note: If sub_id is None, Mendelian (co-)variance will be estimated for
all individuals. Otherwise, Mendelian (co-)variance will be estimated for
the individuals in sub_id
"""
notr = info.meff.columns.size
if notr == 1:
msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
elif notr > 1:
msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
return msvmsc
def array2sym(array):
"""Convert array to stdized symm mat, and back to array without diags."""
dfmsize = array.size
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
iii, jjj = np.tril_indices(notr)
mat = np.empty((notr, notr), float)
mat[iii, jjj], mat[jjj, iii] = array, array
mat = np.array(mat)
mat1 = cov2corr(mat)
return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
"""
Standardize Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
created using msvarcov_g function
Returns
-------
dfcor : pandas.DataFrame
containing standardized Mendelian sampling (co)variance
"""
if msvmsc.columns.size == 3:
sys.exit("Correlation cannot be derived for a single trait")
dfm = msvmsc.iloc[:, 2:msvmsc.shape[1]] # exclude ID and group
dfmsize = dfm.shape[1]
# derive number of traits
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
# standardize covariance between traits
dfcor = dfm.apply(array2sym, axis=1)
# extract column names
listnames = dfm.columns.tolist()
cnames = [x for x in listnames if "_" in x]
# convert pd.series of list to data frame
dfcor = pd.DataFrame.from_dict(dict(zip(dfcor.index, dfcor.values))).T
dfcor.columns = cnames
# insert ID and group info
dfcor = [pd.DataFrame(msvmsc.iloc[:, 0:2]), dfcor] # add ID and GRP
dfcor = pd.concat(dfcor, axis=1)
return dfcor
def calcgbv(info, sub_id):
"""Calculate breeding values for each trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
no_individuals = matsub.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float) # type spec for numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, 0] = matrix_me.sum(axis=1) # sum all effects
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float) # type spec 4 numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, i] = matrix_me.sum(axis=1) # sum all effects for each trait
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i] # Agg gen
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "ID", idn, True) # insert ID
gbv.insert(1, "Group", groupsex, True) # insert group
return gbv
def calcprob(info, msvmsc, thresh):
"""Calculate the probability of breeding top individuals."""
aaa = subindcheck(info, pd.DataFrame(msvmsc.iloc[:, 0]))
gbvall = calcgbv(info, None) # calc GEBV for all inds used by thresh
gbv = gbvall.iloc[aaa, :].reset_index(drop=True) # GEBV matching msvmsc
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
probdf = np.zeros((no_individuals, notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh) # threshold
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh) # threshold
probdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh) # threshold
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
msvmsc.iloc[:, (t_ind[i])+2]))
probdf[:, i] = np.nan_to_num(probdf[:, i]) # convert Inf to zero
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+2)], scale=np.sqrt(
msvmsc["AG"]))
probdf[:, notr] = np.nan_to_num(probdf[:, notr]) # Agg
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = [pd.DataFrame(gbv.iloc[:, 0:2]), probdf] # add ID and GRP
probdf = pd.concat(probdf, axis=1)
return probdf
def calcindex(info, msvmsc, const):
"""Calculate the index if constant is known."""
sub_id = pd.DataFrame(msvmsc.iloc[:, 0])
gbv = calcgbv(info, sub_id) # calc GEBV
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((no_individuals, notr))
indexdf[:, 0] = (gbv.iloc[:, (0+2)]/2) + np.sqrt(
msvmsc.iloc[:, 0+2])*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
indexdf[:, i] = (gbv.iloc[:, (i+2)]/2) + np.sqrt(
msvmsc.iloc[:, (t_ind[i]+2)])*const
indexdf[:, notr] = (gbv.iloc[:, (notr+2)]/2) + np.sqrt(
msvmsc["AG"])*const
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = [pd.DataFrame(gbv.iloc[:, 0:2]), indexdf] # add ID and GRP
indexdf = pd.concat(indexdf, axis=1)
return indexdf
def selstrat_g(selstrat, info, sub_id, msvmsc, throrconst):
"""
Calc selection criteria (GEBV, PBTI, or index using gametic approach.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV. If selstrat is index, throrconst is a constant.
If selstrat is GEBV, throrconst can be any random value.
Returns
-------
data : pandas.DataFrame
Index: RangeIndex
Columns:
ID, Group, trait names and Aggregate Breeding Value (ABV)
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_id is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
sys.exit("Provide Mendelian (co-)variance dataframe: 'msvmsc'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
sys.exit("Provide value for throrconst parameter")
if selstrat not in ('GEBV', 'gebv', 'PBTI', 'pbti', 'index', 'INDEX'):
sys.exit("selection strategy should be one of GEBV, PBTI or INDEX")
if selstrat in ('GEBV', 'gebv'):
data = calcgbv(info, sub_id)
elif selstrat in ('PBTI', 'pbti'):
if throrconst > 1 or throrconst < 0:
sys.exit("value must be in the range of 0 and 1")
data = calcprob(info, msvmsc, throrconst)
elif selstrat in ('index', 'INDEX'):
data = calcindex(info, msvmsc, throrconst)
return data
def cov2corr(cov):
"""Convert covariance to correlation matrix."""
cov = np.asanyarray(cov)
std_ = np.sqrt(np.diag(cov))
with np.errstate(invalid='ignore'):
corr = cov / np.outer(std_, std_)
return corr
def aggen(us_ind, no_markers, slst, indwt):
"""Set up additive effects matrix of aggregate genotype."""
mmfinal = np.empty((len(us_ind), no_markers))
xxx = 0
for iii in us_ind:
tmpmt1 = np.array([slst[0][trt][iii, :] for trt in range(indwt.size)])
mmfinal[xxx, :] = np.matmul(indwt.transpose(), tmpmt1)
xxx = xxx + 1
return mmfinal
def chr_int(xxxxx):
"""Format chromomosome of interest parameter."""
if 'all' in xxxxx:
xxxxx = 'all'
elif 'none' in xxxxx:
xxxxx = 'none'
else:
xxxxx = np.array([int(i) for i in xxxxx])
return xxxxx
def writechr(covtmpx, chrinterest, chrm, trtnam, probx, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
def writechrunspec(covtmpx, chrinterest, chrm, trtnam, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
def grtonum(numnx):
"""Map chracters to numeric (0-no of groups)."""
numnx = numnx.reset_index(drop=True)
probn = pd.unique(numnx).tolist()
alt_no = np.arange(0, len(probn), 1)
noli = numnx.tolist()
numnx = np.array(list(map(dict(zip(probn, alt_no)).get, noli, noli)))
return numnx, probn
def datret(info, rw_nms, pfnp, us_ind, slist, covmat, cov_indxx, stdsim,
progress):
"""Return sim mat based on aggregate genotypes."""
snpindexxxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
if info.meff.shape[1] == 1 and not stdsim:
mat = cov_indxx
elif info.meff.shape[1] == 1 and stdsim:
mat = cov2corr(cov_indxx)
elif info.meff.shape[1] > 1:
if info.gmap.shape[1]-3 > 1:
rw_nms = pd.DataFrame(rw_nms)
rw_nms.to_csv(f"order of inds in mat grp {pfnp}.csv", index=False)
if progress:
print('Creating similarity matrix based on aggregate genotype')
progr(0, max(pd.unique(info.gmap.iloc[:, 0])))
tmpmt1 = aggen(us_ind, info.gmap.shape[0], slist, info.indwt)
# stores ABV covariance btw inds
mat = np.zeros((len(us_ind), len(us_ind)))
# loop over chromososomes
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(snpindexxxx[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[0][chrm-1]))
else:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[pfnp][chrm-1]))
mat = mat + covtmpx
if progress:
progr(chrm, max( | pd.unique(info.gmap.iloc[:, 0]) | pandas.unique |
import pandas as pd
import sasoptpy as so
import requests
from subprocess import Popen, DEVNULL
# Solves the pre-season optimization problem
def get_data():
r = requests.get('https://fantasy.premierleague.com/api/bootstrap-static/')
fpl_data = r.json()
element_data = pd.DataFrame(fpl_data['elements'])
team_data = pd.DataFrame(fpl_data['teams'])
elements_team = | pd.merge(element_data, team_data, left_on='team', right_on='id') | pandas.merge |
"""
This file is part of Cytometer
Copyright 2021 Medical Research Council
SPDX-License-Identifier: Apache-2.0
Author: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
# imports for sped up hdquantiles_sd
from numpy import float_, int_, ndarray
import numpy.ma as ma
from scipy.stats.distributions import norm, beta, t, binom
def pval_to_asterisk(pval, brackets=True):
"""
convert p-value scalar or array/dataframe of p-vales to significance strings 'ns', '*', '**', etc.
:param pval: scalar, array or dataframe of p-values
:return: scalar or array with the same shape as the input, where each p-value is converted to its significance
string
"""
def translate(pval, brackets=True):
if brackets:
lb = '('
rb = ')'
else:
lb = ''
rb = ''
if (pval < 0.0) | np.isnan(pval):
return 'nan'
elif pval > 0.05:
return lb + 'ns' + rb
elif pval > 0.01:
return lb + '*' + rb
elif pval > 0.001:
return lb + '**' + rb
elif pval > 0.0001:
return lb + '***' + rb
else:
return lb + '****' + rb
if np.isscalar(pval):
return translate(pval, brackets)
else:
return np.vectorize(translate)(pval, brackets)
def plot_pvals(pvals, xs, ys, ylim=None, corrected_pvals=None, df_pval_location='above', color=None):
if ylim is None:
ylim = plt.gca().get_ylim()
offset = (np.max(ylim) - np.min(ylim)) / 40 # vertical offset between data point and bottom asterisk
if corrected_pvals is None:
corrected_pvals = np.ones(shape=np.array(pvals).shape, dtype=np.float32)
h = []
for pval, corrected_pval, x, y in zip(pvals, corrected_pvals, xs, ys):
str = pval_to_asterisk(pval, brackets=False).replace('*', '∗')
corrected_str = pval_to_asterisk(corrected_pval, brackets=False).replace('*', '∗')
if str == 'ns':
fontsize = 10
else:
fontsize = 7
if corrected_str == 'ns': # we don't plot 'ns' in corrected p-values, to avoid having asterisks overlapped by 'ns'
corrected_str = ''
str = str.replace(corrected_str, '⊛'*len(corrected_str), 1)
if pval > 0.05:
rotation = 90
else:
rotation = 90
if df_pval_location == 'above':
y += offset
va = 'bottom'
else:
y -= 2*offset
va = 'top'
h.append(plt.text(x, y + offset, str, ha='center', va=va, color=color, rotation=rotation, fontsize=fontsize))
return h
def plot_model_coeff(x, df_coeff, df_ci_lo, df_ci_hi, df_pval, ylim=None, df_corrected_pval=None, color=None,
df_pval_location='above', label=None):
if color is None:
# next colour to be used, according to the colour iterator
color = next(plt.gca()._get_lines.prop_cycler)['color']
plt.plot(x, df_coeff, color=color, label=label)
plt.fill_between(x, df_ci_lo, df_ci_hi, alpha=0.5, color=color)
h = plot_pvals(df_pval, x, df_ci_hi, corrected_pvals=df_corrected_pval, ylim=ylim, color=color,
df_pval_location=df_pval_location)
return h
def plot_model_coeff_compare2(x, df_coeff_1, df_ci_lo_1, df_ci_hi_1, df_pval_1,
df_coeff_2, df_ci_lo_2, df_ci_hi_2, df_pval_2,
ylim=None,
df_corrected_pval_1=None, df_corrected_pval_2=None,
color_1=None, color_2=None,
label_1=None, label_2=None):
if color_1 is None:
# next colour to be used, according to the colour iterator
color_1 = next(plt.gca()._get_lines.prop_cycler)['color']
if color_2 is None:
color_2 = next(plt.gca()._get_lines.prop_cycler)['color']
dx = (np.max(x) - np.min(x)) / 60
plt.plot(x, df_coeff_1, color=color_1, label=label_1)
plt.plot(x, df_coeff_2, color=color_2, label=label_2)
plt.fill_between(x, df_ci_lo_1, df_ci_hi_1, alpha=0.5, color=color_1)
plt.fill_between(x, df_ci_lo_2, df_ci_hi_2, alpha=0.5, color=color_2)
y = np.maximum(df_ci_hi_1, df_ci_hi_2)
h1 = plot_pvals(df_pval_1, x - dx, y, corrected_pvals=df_corrected_pval_1, ylim=ylim, color=color_1)
h2 = plot_pvals(df_pval_2, x + dx, y, corrected_pvals=df_corrected_pval_2, ylim=ylim, color=color_2)
return h1, h2
def models_coeff_ci_pval(models, extra_hypotheses=None, model_names=None):
"""
For convenience, extract betas (coefficients), confidence intervals and p-values from a statsmodels model. Each one
corresponds to one t-test of a hypothesis (where the hypothesis is that the coefficient ~= 0).
This function also allows to add extra hypotheses (contrasts) to the model. For example, that the sum of two of
the model's coefficients is ~= 0.
* Example of a model:
import statsmodels.api as sm
model = sm.RLM.from_formula('weight ~ C(sex)', data=df, M=sm.robust.norms.HuberT()).fit()
* Example of extra hypotheses:
'Intercept + C(ko_parent)[T.MAT], C(ko_parent)[T.MAT]=C(ko_parent)[T.FKO]'
:param models: List of statsmodels models (see example above).
:param extra_hypotheses: (def None) String with new hypotheses to t-test in the model (see example above).
:param model_names: (def None) List of strings with the name of each model. This will become the index in each
output dataframe.
:return: df_coeff, df_ci_lo, df_ci_hi, df_pval
"""
if extra_hypotheses is not None:
hypotheses_labels = extra_hypotheses.replace(' ', '').split(',')
df_coeff_tot = | pd.DataFrame() | pandas.DataFrame |
import itertools as itt
import pathlib as pl
from configparser import ConfigParser
import joblib as jl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as sst
import seaborn as sns
from statannot import add_stat_annotation
from src.visualization import fancy_plots as fplt
from src.data.cache import set_name
"""
2020-05-??
Used an exponential decay to model the volution of contextual effectsos over time. Here thee fitted parameters (tau and
y intercept r0) are compared across different treatments (probes, transitions_pairs), between single cell and population
analysis (dPCA, LDA) and finally between fitting the dprime or its profile of significance.
tau is selected from the fitted significance profile, and r0 form the fitted dprime
"""
config = ConfigParser()
config.read_file(open(pl.Path(__file__).parents[2] / 'config' / 'settings.ini'))
# analysis should be createde and cached with trp_batch_dprime.py beforehand, using the same meta parameters
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'transitions': ['silence', 'continuous', 'similar', 'sharp'],
'montecarlo': 1000,
'zscore': True,
'dprime_absolute': None}
# transferable plotting parameters
plt.rcParams['svg.fonttype'] = 'none'
sup_title_size = 30
sub_title_size = 20
ax_lab_size = 15
ax_val_size = 11
full_screen = [19.2, 9.83]
sns.set_style("ticks")
########################################################################################################################
########################################################################################################################
# data frame containing all the important summary data, i.e. exponential decay fits for dprime and significance, for
# all combinations of transition pairs, and probes, for the means across probes, transitions pairs or for both, and
# for the single cell analysis or the dPCA projections
summary_DF_file = pl.Path(config['paths']['analysis_cache']) / 'DF_summary' / set_name(meta)
print('loading cached summary DataFrame')
DF = jl.load(summary_DF_file)
########################################################################################################################
# SC
########################################################################################################################
# compare tau between different probe means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 1000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['cellid', 'probe', 'value']]
pivoted = filtered.pivot(index='cellid', columns='probe', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='probe')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='probe', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='probe', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# no significant comparisons
box_pairs = list(itt.combinations(filtered.probe.unique(), 2))
# box_pairs = [('probe_2', 'probe_3'), ('probe_3', 'probe_5')]
stat_resutls = add_stat_annotation(ax, data=molten, x='probe', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary significance-tau comparison between probes'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# compare tau between different transition pair means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 1000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['cellid', 'transition_pair', 'value']]
pivoted = filtered.pivot(index='cellid', columns='transition_pair', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='transition_pair')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# box_pairs = list(itt.combinations(filtered.transition_pair.unique(), 2))
box_pairs = [('continuous_sharp', 'continuous_similar'), ('continuous_similar', 'silence_continuous'),
('continuous_similar', 'silence_sharp'), ('continuous_similar', 'silence_similar'),
('continuous_similar', 'similar_sharp')]
stat_resutls = add_stat_annotation(ax, data=molten, x='transition_pair', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary significance-tau comparison between transitions'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# compare r0 between different probe means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['cellid', 'probe', 'value']]
pivoted = filtered.pivot(index='cellid', columns='probe', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='probe')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='probe', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='probe', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
box_pairs = list(itt.combinations(filtered.probe.unique(), 2))
# box_pairs = [('probe_2', 'probe_3')]
stat_resutls = add_stat_annotation(ax, data=molten, x='probe', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary dprime-r0 comparison between probes'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# compare r0 between different transition pair means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['cellid', 'transition_pair', 'value']]
pivoted = filtered.pivot(index='cellid', columns='transition_pair', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='transition_pair')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
box_pairs = list(itt.combinations(filtered.transition_pair.unique(), 2))
# box_pairs = [('continuous_sharp', 'continuous_similar'), ('continuous_sharp', 'silence_continuous'),
# ('continuous_sharp', 'silence_sharp'), ('continuous_sharp', 'silence_similar'),
# ('continuous_similar', 'silence_continuous'), ('continuous_similar', 'silence_sharp'),
# ('continuous_similar', 'silence_similar'), ('continuous_similar', 'similar_sharp'),
# ('silence_similar', 'similar_sharp')]
stat_resutls = add_stat_annotation(ax, data=molten, x='transition_pair', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'amplitude (z-score)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary dprime-r0 comparison between transitions'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# Distribution of cells in r0 tau space
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = | pd.concat([R0, Tau]) | pandas.concat |
#!/usr/bin/env python
"""Tests for `specl` package."""
from functools import reduce
import os
import pytest
from unittest.mock import patch, mock_open
import numpy as np
import pandas as pd
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from hypothesis.extra import pandas as hpd
from hypothesis.extra.pandas import columns, data_frames
from specl.specl import read_spec, read_data, build_kwargs_read, rename_columns, dropna_rows, write_data
from tests.fixtures import empty_csv, empty_spec, basic_spec_0, basic_spec_dict, basic_spec, write_funcs
from tests.strategies import gen_columns_and_subset, gen_rando_dataframe, gen_mixed_type_dataset
def write_dataframe_to_tmpdir(tmpdir, write_funcs, df, ext):
with(tmpdir.make_numbered_dir().join(str(f'test{ext}'))) as tmp_file:
write_funcs[ext](df, tmp_file.strpath)
return tmp_file.strpath
def test_that_load_spec_returns_empty_dict_for_empty_spec(empty_spec):
with patch('builtins.open', new_callable=mock_open, read_data=empty_spec):
spec = read_spec('fake/file.yaml')
assert spec == {}
def test_that_load_spec_returns_dict_for_basic_spec(basic_spec):
""" Passes the YAML contents of the basic_spec fixture to the read_spec function
and confirms that the resulting dictionary has the expected shape and content.
"""
with patch('builtins.open', new_callable=mock_open, read_data=basic_spec):
spec = read_spec('fake/file.yaml')
assert spec != {}, "Spec should not be empty for valid YAML file."
assert [*spec] == ['input', 'output'], "Spec should have top-level input and output keys."
assert spec['input']['file'] == 'source.csv', "Spec should have an input file defined."
assert spec['output']['file'] == 'out.csv', "Spec should have an output file defined."
assert list(spec['input']['columns'].keys()) == ['column_a', 'column_b', 'column_c', 'column_d']
def test_that_load_spec_raises_valueerror_for_invalid_spec(basic_spec_0):
with pytest.raises(ValueError) as spec_error:
with patch('builtins.open', new_callable=mock_open, read_data=basic_spec_0):
spec = read_spec('fake/file.yaml')
assert "invalid spec" in str(spec_error.value).lower()
# @settings(deadline=None)
# @given(data_frames(columns=columns("A B C".split(), dtype=int), index=hpd.range_indexes()),
# sampled_from(['.csv', '.xls', '.xlsx', '.parquet']))
# def test_that_read_data_returns_data_frame(tmpdir, write_funcs, basic_spec_dict, df, ext):
# """Given a Hypothesis DataFrame, save it as a file of the sampled type,
# and test the reading that file into a Pandas DataFrame works as expected."""
#
# expected = df.shape[1]
#
# # using make_numbered_dir to avoid path collisions when running test for each
# # hypothesis-generated data frame.
# # p = tmpdir.make_numbered_dir().join(str(f'test{ext}'))
# # write_funcs[ext](df, p.strpath)
# tmp_file_path = write_dataframe_to_tmpdir(tmpdir, write_funcs, df, ext)
# spec = {'input': {'file': tmp_file_path}}
# spec, df_in = read_data(spec)
#
# # TODO: Figure out why hypothesis DF shape not equal to Pandas when read from csv
# assert df_in.shape[1] >= expected
# @settings(deadline=None)
# @given(gen_columns_and_subset(), sampled_from(['.csv', '.xls', '.xlsx', '.parquet']))
# def test_that_read_function_called_with_columns_specified(tmpdir, write_funcs, basic_spec_dict, df_config, ext):
# hdf, keeper_cols = df_config
# tmp_file_path = write_dataframe_to_tmpdir(tmpdir, write_funcs, hdf, ext)
# col_specs = map(lambda c: {c: {'data_type': 'int'}}, keeper_cols)
# basic_spec_dict['input']['file'] = tmp_file_path
# basic_spec_dict['input']['columns'] = {}
# # bogus, i know
# for col in col_specs:
# col_name = list(col.keys())[0]
# col_spec = list(col.values())[0]
# basic_spec_dict['input']['columns'][col_name] = col_spec
# spec, df = read_data(basic_spec_dict)
# assert list(df.columns.values).sort() == list(keeper_cols).sort()
def test_that_build_kwargs_adds_columns_arg(basic_spec_dict):
kwargs = build_kwargs_read(basic_spec_dict, '.xlsx')
assert 'usecols' in list(kwargs.keys())
def test_that_build_kwargs_adds_columns_arg_based_on_ext(basic_spec_dict):
kwargs = build_kwargs_read(basic_spec_dict, '.parquet')
assert 'columns' in list(kwargs.keys())
def test_that_build_kwargs_does_not_add_columns_arg_when_empty():
spec = {'input': {'file': 'foo.txt'}}
kwargs = build_kwargs_read(spec, '.csv')
assert 'usecols' not in list(kwargs.keys())
def test_that_columns_get_renamed_per_spec(basic_spec_dict):
basic_dataframe = pd.DataFrame(data={'A': [1, 2], 'B': [3, 4]})
basic_spec_dict['input']['columns'] = {'A': {'data_type': 'int', 'name': 'foo'},
'B': {'date_type': 'int', 'name': 'bar'}}
spec, renamed_df = rename_columns(basic_spec_dict, basic_dataframe)
assert spec == basic_spec_dict
assert list(renamed_df.columns) == ['foo', 'bar']
# @given(gen_rando_dataframe())
# def test_that_generated_columns_get_renamed_per_spec(basic_spec_dict, hdf):
# rename_col_config = map(lambda x: {x: {'data_type': 'int', 'name': x.upper()}}, list(hdf.columns))
# basic_spec_dict['input']['columns'] = reduce(lambda config, col: config.update(col) or config,
# list(rename_col_config))
# spec, renamed_df = rename_columns(basic_spec_dict, hdf)
# assert spec == basic_spec_dict
# assert list(renamed_df.columns) == list(map(lambda col_name: col_name.upper(), list(hdf.columns)))
def test_that_drop_na_works_for_any(basic_spec_dict):
basic_dataframe = | pd.DataFrame(data={'A': [1, np.nan, 5], 'B': [3, np.nan, np.nan]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
# In[2]:
# Load the training, additional, confidence, and test data
train_data = pd.read_csv('training.csv')
test_data = pd.read_csv('testing.csv')
additional_data = pd.read_csv('additional_training.csv')
confidence = pd.read_csv('annotation_confidence.csv')
# In[3]:
# Look at their shapes
print('Train data: ',train_data.shape)
print('Test data: ',test_data.shape)
print('Additional data: ',additional_data.shape)
print('Confidence data: ',confidence.shape)
# In[4]:
# Visualise some of the train_data
train_data.tail(2)
# In[5]:
# Visualise some of the additional_data
additional_data.tail(2)
# In[6]:
# Make sure the additional and training data are of the same type so we can add them together
print(train_data.dtypes)
print(additional_data.dtypes)
# In[649]:
# Since they're the same types, append them together.
full_train = train_data.append(additional_data)
print(full_train.shape)
full_train
# In[650]:
# Fill the NaN values
full_train_ = full_train.fillna(full_train.mean())
# In[651]:
np.where(np.isnan(full_train_))
# In[652]:
full_train_['confidence'] = confidence['confidence']
# In[653]:
predictions = full_train_.prediction
confidences = full_train_.confidence
train = full_train_.drop('prediction',axis=1)
train = train.drop('confidence',axis=1)
train.tail(1)
# In[654]:
from sklearn.model_selection import train_test_split
x_train,x_val = train_test_split(full_train_,train_size=0.7)
# In[655]:
np.where(np.isnan(x_train))
# In[656]:
np.where(np.isnan(x_val))
# In[657]:
train_pred = x_train.prediction
val_pred = x_val.prediction
train_conf = x_train.confidence
val_conf = x_val.confidence
x_train = x_train.drop('prediction',axis=1)
x_train = x_train.drop('confidence',axis=1)
x_val = x_val.drop('prediction',axis=1)
x_val = x_val.drop('confidence',axis=1)
# In[721]:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train)
x_train_scaled = scaler.transform(x_train)
x_val_scaled = scaler.transform(x_val)
test_scaled = scaler.transform(test_data)
# In[755]:
from sklearn.decomposition import PCA
pca2 = PCA().fit(x_train_scaled)
plt.plot(np.cumsum(pca2.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
# In[722]:
pca = PCA(1500)
pca.fit(x_train_scaled)
pcaTrain = pca.transform(x_train_scaled)
pcaVal = pca.transform(x_val_scaled)
pcaTest = pca.transform(test_scaled)
# In[723]:
train_2 = scaler.fit_transform(train)
train_2 = pca.transform(train_2)
# In[724]:
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
logreg2 = LogisticRegression(C=1)
logreg2.fit(pcaTrain, train_pred ,sample_weight=train_conf)
# In[725]:
valPred = logreg2.predict(pcaVal)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg2.score(pcaVal, val_pred)))
# In[726]:
np.where(valPred==0)
# In[727]:
np.where(val_pred==0)
# In[715]:
test_y = logreg2.predict(pcaTest)
# In[716]:
np.where(test_y==0)
# In[ ]:
# In[736]:
log_df = pd.DataFrame(test_y)
log_df.columns = ['prediction']
predds= | pd.DataFrame({'ID': test_data.ID, 'prediction': log_df['prediction']}) | pandas.DataFrame |
import os
import warnings
import pandas as pd
from .. import make_canon_dataset
TEST_FP = os.path.dirname(os.path.abspath(__file__))
DATA_FP = os.path.join(TEST_FP, 'data', 'processed')
def test_read_records(tmpdir):
result = make_canon_dataset.read_records(
os.path.join(DATA_FP, 'crash_joined.json'),
'near_id',
['bike', 'pedestrian', 'vehicle']
)
expected = pd.DataFrame({
'near_id': [1, 2, 3, '000', '002', '003', '004', '005', '007', '008'],
'crash': [2, 18, 2, 5, 3, 14, 2, 11, 1, 4],
'bike': [0, 3, 0, 0, 1, 1, 0, 3, 0, 1],
'pedestrian': [0, 3, 1, 1, 0, 0, 1, 0, 0, 0],
'vehicle': [2, 12, 1, 4, 2, 13, 1, 8, 1, 3]
})
| pd.testing.assert_frame_equal(result, expected, check_dtype=False) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# # Prepare SpaceNet 7 Data for Model Testing
#
# This Python script does the data processing steps (but not the visualization steps) from the ../notebooks/sn7_data_prep.ipynb notebook. It takes the input file location as an argument.
# In[ ]:
import multiprocessing
import pandas as pd
import numpy as np
import skimage
import gdal
import sys
import os
import matplotlib as mpl
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
plt.rcParams.update({'font.size': 16})
mpl.rcParams['figure.dpi'] = 300
import solaris as sol
from solaris.raster.image import create_multiband_geotiff
from solaris.utils.core import _check_gdf_load
# import from data_prep_funcs
module_path = os.path.abspath(os.path.join('../src/'))
if module_path not in sys.path:
sys.path.append(module_path)
from sn7_baseline_prep_funcs import map_wrapper, make_geojsons_and_masks
# In[ ]:
# Dataset location
root_dir = sys.argv[1]
outp_dir = sys.argv[2]
# In[ ]:
# Make dataframe csvs for test
out_dir = os.path.join(outp_dir, '../csvs/')
pops = ['test']
os.makedirs(out_dir, exist_ok=True)
for pop in pops:
d = root_dir
outpath = os.path.join(out_dir, 'sn7_baseline_' + pop + '_df.csv')
im_list, mask_list = [], []
subdirs = sorted([f for f in os.listdir(d) if os.path.isdir(os.path.join(d, f))])
for subdir in subdirs:
if pop == 'train':
im_files = [os.path.join(root_dir, subdir, 'images_masked', f)
for f in sorted(os.listdir(os.path.join(root_dir, subdir, 'images_masked')))
if f.endswith('.tif') and os.path.exists(os.path.join(d, subdir, 'masks', f.split('.')[0] + '_Buildings.tif'))]
mask_files = [os.path.join(d, subdir, 'masks', f.split('.')[0] + '_Buildings.tif')
for f in sorted(os.listdir(os.path.join(root_dir, subdir, 'images_masked')))
if f.endswith('.tif') and os.path.exists(os.path.join(d, subdir, 'masks', f.split('.')[0] + '_Buildings.tif'))]
im_list.extend(im_files)
mask_list.extend(mask_files)
elif pop == 'test':
im_files = [os.path.join(root_dir, subdir, 'images_masked', f)
for f in sorted(os.listdir(os.path.join(root_dir, subdir, 'images_masked')))
if f.endswith('.tif')]
im_list.extend(im_files)
# save to dataframes
# print("im_list:", im_list)
# print("mask_list:", mask_list)
if pop == 'train':
df = | pd.DataFrame({'image': im_list, 'label': mask_list}) | pandas.DataFrame |
#########
#File: c:\Users\digan\Dropbox\Dynamic_Networks\repos\ScoreDrivenExponentialRandomGraphs\_research\analysis_for_paper_revision\applic_reddit\0_load_reddit_pre_process.py
#Created Date: Tuesday May 4th 2021
#Author: <NAME>, <<EMAIL>>
#-----
#Last Modified: Thursday May 6th 2021 1:46:42 pm
#Modified By: <NAME>
#-----
#Description: preprocess reddit hyperlink data downloaded from https://snap.stanford.edu/data/soc-RedditHyperlinks.html
#-----
########
#%%
import pandas as pd
import numpy as np
import os
import sys
from matplotlib import pyplot as plt
#%%
# load data and rename columns
data_path = "../../../data/reddit_hyperlinks/raw_data/"
os.listdir(data_path)
col_names = ["source", "target", "post_id", "time", "sentiment", "properties"]
df_orig = pd.read_csv(f"{data_path}soc-redditHyperlinks-body.tsv", names = col_names, sep="\t", header = 0)
df_orig["datetime"] = pd.to_datetime(df_orig.time)
df_orig = df_orig.set_index("datetime")
df_orig = df_orig.sort_values(by="datetime")
#%% EDA
# check aggregate number of obs
df_count = df_orig.time.resample("W").count()
plt.plot(df_count, ".")
plt.plot(df_count[df_count==0], ".r")
# number of nodes appearing at least once
| pd.concat((df_orig.source, df_orig.target)) | pandas.concat |
# Script to convert labels into categories based on arguments
import argparse
import numpy as np
import pandas as pd
import csv
# Example command: python3 convertHistoneLabels.py --cell_file data/Cell1.test.csv --output_file Cell1Conv.test.csv
# python3 convertHistoneLabels.py --cell_file data/Cell1.test.csv --output_file temporary_testing.tsv
'''
python3 convertHistoneLabels.py --cell_file data/Cell1.test.csv --output_file C1Test.csv
python3 convertHistoneLabels.py --cell_file data/Cell2.test.csv --output_file C2Test.csv
python3 convertHistoneLabels.py --cell_file data/Cell1.train.csv --output_file C1Train.csv
python3 convertHistoneLabels.py --cell_file data/Cell2.train.csv --output_file C2Train.csv
python3 convertHistoneLabels.py --cell_file data/Cell1.valid.csv --output_file C1Valid.csv
python3 convertHistoneLabels.py --cell_file data/Cell2.valid.csv --output_file C2Valid.csv
python3 transformHistoneData.py --cell1_file data/Cell1.train.csv --cell2_file data/Cell2.train.csv --output_file DiffTrain.csv
python3 transformHistoneData.py --cell1_file data/Cell1.test.csv --cell2_file data/Cell2.test.csv --output_file DiffTest.csv
python3 transformHistoneData.py --cell1_file data/Cell1.valid.csv --cell2_file data/Cell2.valid.csv --output_file DiffValid.csv
'''
def concat_row(row):
return row["hm1"] + row["hm2"] + row["hm3"] + row["hm4"] + row["hm5"]
def transformSingleCellHistones(cell_file, output_file):
histones = None
# Read in histone modification data from text files
with open(cell_file, "r") as text_file:
histones_lines = text_file.read().splitlines()
histones = np.asarray([np.asarray(line.split(",")) for line in histones_lines])
#Convert numpy matrix to pandas dataframe
full_df = pd.DataFrame(histones)
full_df.columns = ["id", "hm1", "hm2", "hm3", "hm4", "hm5"]
full_df["id"] = full_df["id"].str[:15]
histone_df = pd.DataFrame({"hm1": full_df["hm1"], "hm2": full_df["hm2"], "hm3": full_df["hm3"], "hm4": full_df["hm4"], "hm5": full_df["hm5"]})
histone_df = histone_df.apply(pd.to_numeric)
allowed_vals = [0,1]
histone_df[~histone_df.isin(allowed_vals)] = "h"
histone_df = histone_df.replace(0, 'l')
histone_df = histone_df.replace(1, "m")
def concat_row(row):
return row["hm1"] + row["hm2"] + row["hm3"] + row["hm4"] + row["hm5"]
histone_df['concat'] = histone_df.apply (lambda row: concat_row(row), axis=1)
new_df = pd.DataFrame({"id": full_df["id"], "token": histone_df["concat"]})
# combines rows where the cell is the same, removes cell from dataframe
# that way we just have sentences of histone modification tokens for each cell
array_agg = lambda x: ' '.join(x.astype(str))
grp_df = new_df.groupby(['id']).agg({'token': array_agg})
grp_df.to_csv(output_file, index=False, header=False, quoting=csv.QUOTE_NONE)
def transformDualCellHistones(cell1_file, cell2_file, output_file):
cell1, cell2 = None, None
# Read in histone modification data from text files
with open(cell1_file, "r") as text_file:
cell1_lines = text_file.read().splitlines()
cell1 = np.asarray([np.asarray(line.split(",")) for line in cell1_lines])
with open(cell2_file, "r") as text_file:
cell2_lines = text_file.read().splitlines()
cell2 = np.asarray([np.asarray(line.split(",")) for line in cell2_lines])
#Convert numpy matrix to pandas dataframe
cell1_df = pd.DataFrame(cell1)
cell2_df = pd.DataFrame(cell2)
cell1_df.columns = ["id", "hm1", "hm2", "hm3", "hm4", "hm5"]
cell2_df.columns = ["id", "hm1", "hm2", "hm3", "hm4", "hm5"]
cell1_df["id"] = cell1_df["id"].str[:15]
cell2_df["id"] = cell2_df["id"].str[:15]
cell1_hm_df = pd.DataFrame({"hm1": cell1_df["hm1"], "hm2": cell1_df["hm2"], "hm3": cell1_df["hm3"], "hm4": cell1_df["hm4"], "hm5": cell1_df["hm5"]})
cell2_hm_df = pd.DataFrame({"hm1": cell2_df["hm1"], "hm2": cell2_df["hm2"], "hm3": cell2_df["hm3"], "hm4": cell2_df["hm4"], "hm5": cell2_df["hm5"]})
cell1_hm_df = cell1_hm_df.apply(pd.to_numeric)
cell2_hm_df = cell2_hm_df.apply(pd.to_numeric)
diff_cell = | pd.DataFrame({"hm1": cell1_hm_df["hm1"] - cell2_hm_df["hm1"], "hm2": cell1_hm_df["hm2"] - cell2_hm_df["hm2"], "hm3": cell1_hm_df["hm3"] - cell2_hm_df["hm3"], "hm4": cell1_hm_df["hm4"] - cell2_hm_df["hm4"], "hm5": cell1_hm_df["hm5"] - cell2_hm_df["hm5"]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import tensorflow as tf
import os, pickle
class Reader(object):
def read(self, data_path):
self.read_data()
self.merge_id()
self.add_reverse()
if self.args.reindex:
self.reindex_kb()
self.gen_t_label()
self._ent_num = self._entity_num
self._rel_num = self._relation_num
self._ent_mapping = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._rel_mapping = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._ent_testing = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._rel_testing = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self.gen_filter_mat()
self._kb = self._train_data
return
def read_data(self):
pass
def merge_id(self):
self._train_data['h_id'] = self._e_id[self._train_data.h].values
self._train_data['r_id'] = self._r_id[self._train_data.r].values
self._train_data['t_id'] = self._e_id[self._train_data.t].values
self._test_data['h_id'] = self._e_id[self._test_data.h].values
self._test_data['r_id'] = self._r_id[self._test_data.r].values
self._test_data['t_id'] = self._e_id[self._test_data.t].values
self._valid_data['h_id'] = self._e_id[self._valid_data.h].values
self._valid_data['r_id'] = self._r_id[self._valid_data.r].values
self._valid_data['t_id'] = self._e_id[self._valid_data.t].values
def gen_t_label(self):
full = pd.concat([self._train_data, self._test_data, self._valid_data], ignore_index=True)
f_t_labels = full['t_id'].groupby([full['h_id'], full['r_id']]).apply(lambda x: pd.unique(x.values))
f_t_labels.name = 't_label'
self._test_data = self._test_data.join(f_t_labels, on=['h_id', 'r_id'])
self._valid_data = self._valid_data.join(f_t_labels, on=['h_id', 'r_id'])
def add_reverse(self):
def add_reverse_for_data(data):
reversed_data = data.rename(columns={'h_id': 't_id', 't_id': 'h_id'})
reversed_data.r_id += self._relation_num
data = pd.concat(([data, reversed_data]), ignore_index=True, sort=False)
return data
self._train_data = add_reverse_for_data(self._train_data)
self._test_data = add_reverse_for_data(self._test_data)
self._valid_data = add_reverse_for_data(self._valid_data)
self._relation_num_for_eval = self._relation_num
self._relation_num *= 2
def reindex_kb(self):
train_data = self._train_data
test_data = self._test_data
valid_data = self._valid_data
eids = pd.concat([train_data.h_id, train_data.t_id, self._e_id], ignore_index=True)
tv_eids = np.unique(pd.concat([test_data.h_id, test_data.t_id, valid_data.t_id, valid_data.h_id]))
not_train_eids = tv_eids[~np.in1d(tv_eids, eids)]
rids = pd.concat([train_data.r_id, pd.Series(np.arange(self._relation_num))],ignore_index=True)
def gen_map(eids, rids):
e_num = eids.groupby(eids.values).size().sort_values()[::-1]
not_train = pd.Series(np.zeros_like(not_train_eids), index=not_train_eids)
e_num = pd.concat([e_num, not_train])
r_num = rids.groupby(rids.values).size().sort_values()[::-1]
e_map = pd.Series(range(e_num.shape[0]), index=e_num.index)
r_map = pd.Series(range(r_num.shape[0]), index=r_num.index)
return e_map, r_map
def remap_kb(kb, e_map, r_map):
kb.loc[:, 'h_id'] = e_map.loc[kb.h_id.values].values
kb.loc[:, 'r_id'] = r_map.loc[kb.r_id.values].values
kb.loc[:, 't_id'] = e_map.loc[kb.t_id.values].values
return kb
def remap_id(s, rm):
s = rm.loc[s.values].values
return s
e_map, r_map = gen_map(eids, rids)
self._e_map, self._r_map = e_map, r_map
self._train_data = remap_kb(train_data, e_map, r_map)
self._valid_data = remap_kb(self._valid_data, e_map, r_map)
self._test_data = remap_kb(self._test_data, e_map, r_map)
self._e_id = remap_id(self._e_id, e_map)
self._r_id = remap_id(self._r_id, r_map)
return not_train_eids
def in2d(self, arr1, arr2):
"""Generalisation of numpy.in1d to 2D arrays"""
assert arr1.dtype == arr2.dtype
arr1_view = np.ascontiguousarray(arr1).view(np.dtype((np.void,
arr1.dtype.itemsize * arr1.shape[1])))
arr2_view = np.ascontiguousarray(arr2).view(np.dtype((np.void,
arr2.dtype.itemsize * arr2.shape[1])))
intersected = np.in1d(arr1_view, arr2_view)
return intersected.view(np.bool).reshape(-1)
def gen_filter_mat(self):
def sp_gen_filter_mat(t_label):
rows, cols = [], []
for row, tails in enumerate(t_label):
rows += list(np.repeat(row, repeats=len(tails)))
cols += list(tails)
return (rows, cols)
self._tail_valid_filter_mat = sp_gen_filter_mat(self._valid_data.t_label)
self._tail_test_filter_mat = sp_gen_filter_mat(self._test_data.t_label)
def gen_label_mat_for_train(self):
def gen_train_relation_label_vac(r):
c = pd.value_counts(r)
values = 1. * c.values / c.sum()
return np.stack([c.index, values], axis=1)
def gen_train_entity_label_vac(r):
indices = np.stack([r.label_id.values, r.values], axis=1)
values = np.ones_like(r.values, dtype=np.int)
return tf.SparseTensor(indices=indices, values=values, dense_shape=[1, self._entity_num])
tr = self._train_data
labels = tr['t_id'].groupby([tr['h_id'], tr['r_id']]).size()
labels = pd.Series(range(labels.shape[0]), index=labels.index)
labels.name = 'label_id'
tr = tr.join(labels, on=['h_id', 'r_id'])
self._train_data = tr
sp_tr = tf.SparseTensor(tr[['label_id', 't_id']].values, np.ones([len(tr)], dtype=np.float32), dense_shape=[len(tr), self._entity_num])
self._label_indices, self._label_values = sp_tr.indices[:], sp_tr.values[:]
class FreeBaseReader(Reader):
def read_data(self):
path = self._options.data_path
tr = pd.read_csv(path + 'train.txt', header=None, sep='\t', names=['h', 't', 'r'])
te = pd.read_csv(path + 'test.txt', header=None, sep='\t', names=['h', 't', 'r'])
val = pd.read_csv(path + 'valid.txt', header=None, sep='\t', names=['h', 't', 'r'])
e_id = pd.read_csv(path + 'entity2id.txt', header=None, sep='\t', names=['e', 'eid'])
e_id = pd.Series(e_id.eid.values, index=e_id.e.values)
r_id = pd.read_csv(path + 'relation2id.txt', header=None, sep='\t', names=['r', 'rid'])
r_id = pd.Series(r_id.rid.values, index=r_id.r.values)
self._entity_num = e_id.shape[0]
self._relation_num = r_id.shape[0]
self._train_data = tr
self._test_data = te
self._valid_data = val
self._e_id, self._r_id = e_id, r_id
class WordNetReader(Reader):
def read_data(self):
path = self._options.data_path
tr = pd.read_csv(path+'train.txt', header=None, sep='\t', names=['h', 'r', 't'])
te = pd.read_csv(path + 'test.txt', header=None, sep='\t', names=['h', 'r', 't'])
val = | pd.read_csv(path + 'valid.txt', header=None, sep='\t', names=['h', 'r', 't']) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Country data of B.1.1.7 occurrence.
Function: get_country_data().
@author: @hk_nien
"""
import re
from pathlib import Path
import pandas as pd
import datetime
import numpy as np
def _ywd2date(ywd):
"""Convert 'yyyy-Www-d' string to date (12:00 on that day)."""
twelvehours = pd.Timedelta('12 h')
dt = datetime.datetime.strptime(ywd, "%G-W%V-%w") + twelvehours
return dt
def _add_odds_column(df):
df['or_b117'] = df['f_b117'] / (1 + df['f_b117'])
def _convert_ywd_records(records, colnames=('f_b117',)):
"""From records to DataFrame with columns.
Records are tuples with ('yyyy-Www-d', value, ...).
"""
df = pd.DataFrame.from_records(records, columns=('Date',) + tuple(colnames))
df['Date'] = [_ywd2date(r) for r in df['Date']]
df = df.set_index('Date')
if 'f_b117' in colnames and 'or_b117' not in colnames:
_add_odds_column(df)
return df
def set_date_in_records_keys(rrecords):
"""Replace {date} in key by date in 1st item for each, in-place.
e.g. 'NL ({date})' -> 'NL (2021-01-01)'.
"""
keys = list(rrecords.keys())
for k in keys:
new_k = k.format(date=rrecords[k][0]['date'])
record = rrecords[k]
del rrecords[k]
rrecords[new_k] = record
def _add_meta_record(reclist, desc, mdict, refs):
"""Add record to reclist; record is a dict with keys:
desc=desc,
**mdict,
refs->tuple of URLs
mdict['date'] will be converted to DateTime object.
"""
refs = tuple(refs)
rec = {'desc': desc,
**mdict,
'refs': refs,
}
if 'date' in rec:
rec['date'] = pd.to_datetime(rec['date'])
reclist.append(rec)
def _get_data_uk_genomic():
# https://twitter.com/hk_nien/status/1344937884898488322
# data points read from plot (as ln prevalence)
seedata = {
'2020-12-21': [
dict(date='2020-12-21', is_recent=False, is_seq=True, en_part='South East England'),
['https://t.co/njAXPsVlvb?amp=1'],
['2020-09-25', -4.2*1.25],
['2020-10-02', -3.5*1.25],
['2020-10-15', -3.2*1.25],
['2020-10-20', -2.3*1.25],
['2020-10-29', -2.3*1.25],
['2020-11-05', -1.5*1.25],
['2020-11-12', -0.9*1.25],
['2020-11-19', -0.15*1.25],
['2020-11-27', 0.8*1.25]
],
'2020-12-31': [
dict(date='2020-12-31', is_recent=True, is_seq=True, en_part='South East England'),
['https://www.imperial.ac.uk/media/imperial-college/medicine/mrc-gida/2020-12-31-COVID19-Report-42-Preprint-VOC.pdf'
],
['2020-10-31', -2.1],
['2020-11-08', -1.35],
['2020-11-15', -0.75],
['2020-11-22', -0.05],
['2020-11-29', 0.05],
]
}
cdict = {}
meta_records = []
for report_date, records in seedata.items():
df = pd.DataFrame.from_records(records[2:], columns=['Date', 'ln_odds'])
df['Date'] = pd.to_datetime(df['Date'])
odds = np.exp(df['ln_odds'])
df['f_b117'] = odds / (1 + odds)
df = df[['Date', 'f_b117']].set_index('Date')
desc = f'South East England (seq, {report_date})'
cdict[desc] = df
_add_meta_record(meta_records, desc, records[0], records[1])
return cdict, meta_records
def _get_data_countries_weeknos():
"""Countries with f_117 data by week number.
Return dataframe with metadata and dict of dataframes.
"""
# All country records are ('{year}-W{weekno}-{weekday}', fraction_b117)
# Item 0 in each list: metadata
# Item 1 in each list: source URLs
country_records = {
'DK (seq; {date})': [
dict(ccode='DK', date='2021-01-01', is_seq=True, is_recent=False),
['https://covid19.ssi.dk/-/media/cdn/files/opdaterede-data-paa-ny-engelsk-virusvariant-sarscov2-cluster-b117--01012021.pdf?la=da'],
('2020-W49-4', 0.002),
('2020-W50-4', 0.005),
('2020-W51-4', 0.009),
('2020-W52-4', 0.023)
],
'DK (seq; {date})': [
dict(ccode='DK', date='2021-02-14', is_seq=True, is_recent=True),
['https://www.covid19genomics.dk/statistics'],
('2020-W48-4', 0.002),
('2020-W49-4', 0.002),
('2020-W50-4', 0.004),
('2020-W51-4', 0.008),
('2020-W52-4', 0.020),
('2020-W53-4', 0.024),
('2021-W01-4', 0.040), # last updated 2021-02-05
('2021-W02-4', 0.075),
('2021-W03-4', 0.128),
('2021-W04-4', 0.191), # last updated 2021-02-05
('2021-W05-4', 0.271), # last updated before 2021-02-14
],
'NL (seq; {date}; OMT)': [
dict(ccode='NL', date='2021-01-01', is_seq=True, is_recent=False),
['https://www.tweedekamer.nl/kamerstukken/brieven_regering/detail?id=2021Z00794&did=2021D02016',
'https://www.rivm.nl/coronavirus-covid-19/omt'],
('2020-W49-4', 0.011),
('2020-W50-4', 0.007),
('2020-W51-4', 0.011),
('2020-W52-4', 0.014),
('2020-W53-4', 0.052),
('2021-W01-4', 0.119), # preliminary
],
'NL (seq; {date})': [
dict(ccode='NL', date='2021-02-07', is_seq=True, is_recent=True),
['https://www.tweedekamer.nl/kamerstukken/brieven_regering/detail?id=2021Z00794&did=2021D02016',
'https://www.tweedekamer.nl/sites/default/files/atoms/files/20210120_technische_briefing_commissie_vws_presentati_jaapvandissel_rivm_0.pdf',
'https://www.tweedekamer.nl/downloads/document?id=00588209-3f6b-4bfd-a031-2d283129331c&title=98e%20OMT%20advies%20deel%201%20en%20kabinetsreactie',
'https://www.tweedekamer.nl/downloads/document?id=be0cb7fc-e3fd-4a73-8964-56f154fc387e&title=Advies%20n.a.v.%2099e%20OMT%20deel%202.pdf'
],
('2020-W49-5', 0.011), # OMT #96 >>
('2020-W50-5', 0.007),
('2020-W51-5', 0.011),
('2020-W52-5', 0.020),
('2020-W53-5', 0.050), # << OMT #96
('2021-W01-5', 0.090), # TK briefing (read from figure ±0.005)
('2021-W02-5', 0.198), # OMT #98 (31 Jan)
('2021-W03-5', 0.241), # OMT #99
],
'UK (seq; {date})': [
dict(ccode='UK', date='2021-01-21', is_seq=True, is_recent=True),
['https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-risk-related-to-spread-of-new-SARS-CoV-2-variants-EU-EEA-first-update.pdf',
],
# Fig. 2. (traced, +/- 0.001 accuracy)
('2020-W43-4', 0.003),
('2020-W44-4', 0.008),
('2020-W45-4', 0.026),
('2020-W46-4', 0.063),
('2020-W47-4', 0.108),
('2020-W48-4', 0.101),
('2020-W49-4', 0.140),
('2020-W50-4', 0.333),
('2020-W51-4', 0.483),
('2020-W52-4', 0.539),
('2020-W53-4', 0.693),
# ('2021-W01-4', ...),
],
'PT (seq; {date})': [
dict(ccode='PT', date='2021-02-11', is_seq=True, is_recent=True),
['https://virological.org/t/tracking-sars-cov-2-voc-202012-01-lineage-b-1-1-7-dissemination-in-portugal-insights-from-nationwide-rt-pcr-spike-gene-drop-out-data/600',
'https://virological.org/t/tracking-sars-cov-2-voc-202012-01-lineage-b-1-1-7-dissemination-in-portugal-insights-from-nationwide-rt-pcr-spike-gene-drop-out-data/600/4',
'https://virological.org/t/tracking-sars-cov-2-voc-202012-01-lineage-b-1-1-7-dissemination-in-portugal-insights-from-nationwide-rt-pcr-spike-gene-drop-out-data/600/7',
],
('2020-W49-4', 0.019),
('2020-W50-4', 0.009),
('2020-W51-4', 0.013),
('2020-W52-4', 0.019),
('2020-W53-4', 0.032),
('2021-W01-4', 0.074),
('2021-W02-4', 0.133),
('2021-W03-4', 0.247),
('2021-W04-4', 0.365),
('2021-W05-4', 0.427),
],
'CH (seq; {date})': [
dict(ccode='CH', date='2021-02-14', is_seq=True, is_recent=True),
['https://sciencetaskforce.ch/nextstrain-phylogentische-analysen/'],
('2020-W51-4', 0.0004),
('2020-W52-4', 0.0043),
('2020-W53-4', 0.0074),
('2021-W01-4', 0.0153),
('2021-W02-4', 0.0329),
('2021-W03-4', 0.0881),
('2021-W04-4', 0.158), # last updated ca. 2021-02-05
('2021-W05-4', 0.235), # last updated before 2021-02-14
],
# https://assets.gov.ie/121054/55e77ccd-7d71-4553-90c9-5cd6cdee7420.pdf (p. 53) up to wk 1
# https://assets.gov.ie/121662/184e8d00-9080-44aa-af74-dbb13b0dcd34.pdf (p. 2, bullet 8) wk 2/3
'IE (SGTF; {date})': [
dict(ccode='IE', date='2021-02-04', is_seq=False, is_sgtf=True, is_recent=True),
['https://assets.gov.ie/121054/55e77ccd-7d71-4553-90c9-5cd6cdee7420.pdf', # (p. 53) up to wk 1
'https://assets.gov.ie/121662/184e8d00-9080-44aa-af74-dbb13b0dcd34.pdf', # (p. 2, bullet 8) wk 2/3
'https://assets.gov.ie/122798/644f5185-5067-4bd4-89fa-8cb75670821d.pdf', # p. 2, bullet 5
],
('2020-W50-4', 0.014),
('2020-W51-4', 0.086),
('2020-W52-4', 0.163),
('2020-W53-4', 0.262),
('2021-W01-4', 0.463), # 21 Jan
('2021-W02-4', 0.58),
('2021-W03-4', 0.63), # 28 Jan
('2021-W04-4', 0.695), # 4 Feb
('2021-W05-4', 0.75), # 4 Feb
]
}
set_date_in_records_keys(country_records)
cdict = {}
meta_records = []
for desc, records in country_records.items():
cdict[desc] = _convert_ywd_records(records[2:], ['f_b117'])
_add_meta_record(meta_records, desc, records[0], records[1])
return cdict, meta_records
#%%
regions_pop = {
'South East England': 9180135,
'London': 8961989,
'North West England': 7341196,
'East England': 6236072,
'West Midlands': 5934037,
'South West England': 5624696,
'Yorkshire': 5502967,
'East Midlands': 4835928,
'North East England': 2669941,
}
regions_pop['England (multiple regions; 2021-01-15)'] = sum(regions_pop.values())
uk_countries_pop = {
'England': 56286961,
'Scotland': 5463300,
'Wales': 3152879,
'Northern Ireland': 1893667,
}
def _get_data_England_regions(subtract_bg=True):
"""Get datasets for England regions. Original data represents 'positive population'.
Dividing by 28 days and time-shifting 14 days to get estimated daily increments.
With subtract_bg: Subtracting lowest region value - assuming background
false-positive for S-gene target failure.
Data source: Walker et al., https://doi.org/10.1101/2021.01.13.21249721
Published 2021-01-15.
"""
index_combi = pd.date_range('2020-09-28', '2020-12-14', freq='7 d')
df_combi = pd.DataFrame(index=index_combi)
ncolumns = ['pct_sgtf', 'pct_othr']
for col in ncolumns:
df_combi[col] = 0
pub_date = '2021-01-15'
cdict = {f'England (SGTF; multiple regions; {pub_date})': df_combi}
for fpath in sorted(Path('data').glob('uk_*_b117_pop.csv')):
ma = re.search('uk_(.*)_b117', str(fpath))
region = ma.group(1).replace('_', ' ')
df = pd.read_csv(fpath, comment='#').rename(columns={'Unnamed: 0': 'Date'})
df['Date'] = pd.to_datetime(df['Date']) - pd.Timedelta(14, 'd')
df = df.set_index('Date')
# interpolate and add to the combined dataframe.
df2 = pd.DataFrame(index=index_combi) # resampling data here
df2 = df2.merge(df[ncolumns], how='outer', left_index=True, right_index=True)
df2 = df2.interpolate(method='quadratic').loc[index_combi]
for col in ncolumns:
df_combi[col] += df2[col]
cdict[f'{region} (SGTF; {pub_date}'] = df
# convert to estimated new cases per day.
for key, df in cdict.items():
region = re.match(r'(.*) \(.*', key).group(1)
if region == 'England':
region = f'England (multiple regions; {pub_date})'
# estimate false-positive for SGTF as representing B.1.1.7
if subtract_bg:
pct_bg = df['pct_sgtf'].min()
else:
pct_bg = 0.0
df['n_b117'] = ((df['pct_sgtf'] - pct_bg)*(0.01/28 * regions_pop[region])).astype(int)
df['n_oth'] = ((df['pct_othr'] + pct_bg)*(0.01/28 * regions_pop[region])).astype(int)
# this doesn't work
# if subtract_bg:
# pct_tot = df['pct_sgtf'] + df['pct_othr']
# # f: fraction of positive test. Correct for background.
# f_sgtf = df['pct_sgtf']/pct_tot
# f_sgtf_min = f_sgtf.min()
# f_sgtf -= f_sgtf_min
# # convert back to pct values
# df['pct_sgtf'] = pct_tot * f_sgtf
# df['pct_othr'] = pct_tot * (1-f_sgtf)
# df['n_b117'] = (df['pct_sgtf'] * (0.01/28 * regions_pop[region])).astype(int)
# df['n_oth'] = (df['pct_othr'] * (0.01/28 * regions_pop[region])).astype(int)
df.drop(index=df.index[df['n_b117'] <= 0], inplace=True)
df['n_pos'] = df['n_b117'] + df['n_oth']
df['or_b117'] = df['n_b117'] / df['n_oth']
df['f_b117'] = df['or_b117']/(1 + df['or_b117'])
for col in ncolumns + ['n_pos']:
df_combi[col] = np.around(df_combi[col], 0).astype(int)
meta_records = []
for desc in cdict.keys():
region = re.match('(.*) \(', desc).group(1)
record = dict(
desc=desc,
date=pd.to_datetime(pub_date),
en_part=region,
is_recent=True,
is_seq=False,
is_sgtf=True,
refs=('https://doi.org/10.1101/2021.01.13.21249721',)
)
meta_records.append(record)
return cdict, meta_records
def load_uk_ons_gov_country_by_var():
"""Get data based on data/ons_gov_uk_country_by_var.xlsx.
Return:
- dataframe
- date_pub
- tuple of source URLs
Dataframe layout:
- index: Date.
- columns: {country_name}:{suffix}
with suffix = 'pnew', 'pnew_lo', 'pnew_hi', 'poth', ..., 'pnid', ...
for percentages new UK variant, CI low, CI high,
other variant, not-identified.
"""
refs = [
'https://www.ons.gov.uk/peoplepopulationandcommunity/healthandsocialcare/conditionsanddiseases/bulletins/coronaviruscovid19infectionsurveypilot/29january2021#positive-tests-that-are-compatible-with-the-new-uk-variant',
'https://www.ons.gov.uk/visualisations/dvc1163/countrybyvar/datadownload.xlsx',
]
# Excel sheet: groups of 9 columns by country (England, Wales, NI, Scotland).
xls_fname = 'data/ons_gov_uk_country_by_var.xlsx'
# 1st round: sanity check and construct better column names.
df = pd.read_excel(xls_fname, skiprows=3)
assert np.all(df.columns[[1, 10]] == ['England', 'Wales'])
assert df.iloc[0][0] == 'Date'
assert df.iloc[0][1] == '% testing positive new variant compatible*'
# percentages new variant, other, unidentified, with 95% CIs.
col_suffixes = ['pnew', 'pnew_hi', 'pnew_lo', 'poth', 'poth_hi', 'poth_lo',
'pnid', 'pnid_hi', 'pnid_lo']
colmap = {df.columns[0]: 'Date'}
for i in range(1, 37, 9):
country_name = df.columns[i]
for j in range(9):
colmap[df.columns[i+j]] = f'{country_name}:{col_suffixes[j]}'
df.rename(columns=colmap, inplace=True)
df.drop(columns=df.columns[37:], inplace=True)
# find the end of the data
i_stop = 2 + np.argmax(df['Date'].iloc[2:].isna())
assert i_stop >= 44
df = df.iloc[2:i_stop]
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace=True)
if df.index[-1] == pd.to_datetime('2021-01-23'):
date_pub = '2021-01-29'
else:
raise ValueError('Please check publication date')
return df, date_pub, refs
def _get_data_uk_countries_ons():
"""Data for UK countries based on PCR (SGTF, N-gege, ...)
Shifted 14 days to estimated date of onset. There doesn't seem to be
a background level (frequencies are too high for false detection to matter).
Sampled 1x per week.
# https://www.ons.gov.uk/peoplepopulationandcommunity/healthandsocialcare/conditionsanddiseases/bulletins/coronaviruscovid19infectionsurveypilot/29january2021#positive-tests-that-are-compatible-with-the-new-uk-variant
# https://www.ons.gov.uk/visualisations/dvc1163/countrybyvar/datadownload.xlsx
"""
df, pub_date, refs = load_uk_ons_gov_country_by_var()
c_names = ['England', 'Wales', 'Northern Ireland', 'Scotland']
c_names = {cn: cn for cn in c_names}
c_names['Northern Ireland'] = 'N. Ireland'
shifted_dates = df.index - pd.Timedelta(14, 'd')
cdict = {}
meta_records = []
# combined data for entire UK
df_uk = pd.DataFrame(index=shifted_dates, data=dict(nnew=0., noth=0.))
for cn in c_names:
pnew = np.array(df[f'{cn}:pnew'], dtype=float)
poth = np.array(df[f'{cn}:poth'], dtype=float)
cdf = pd.DataFrame(
dict(f_b117=pnew/(pnew + poth), or_b117=pnew/poth),
index=shifted_dates
)
population = uk_countries_pop[cn]
df_uk['nnew'] += population / 100 * pnew
df_uk['noth'] += population / 100 * poth
# resample, make sure to include point n-4.
# (don't trust the last few points)
n = len(cdf)
cdf = cdf.iloc[(n-3)%7::7]
desc = f'{c_names[cn]} (SGTF; {pub_date})'
cdict[desc] = cdf
meta_records.append(dict(
desc=desc,
date=pd.to_datetime(pub_date),
uk_part=cn,
is_seq=False,
is_sgtf=True,
refs=refs,
))
df_uk = df_uk.iloc[(len(df_uk)-3)%7::7]
df_uk['f_b117'] = df_uk['nnew']/(df_uk['noth'] + df_uk['nnew'])
df_uk['or_b117'] = df_uk['nnew']/df_uk['noth']
cdict[f'UK (SGTF; {pub_date})'] = df_uk
return cdict, meta_records
def _get_data_ch_parts():
"""Note: this is daily data, not weekly data"""
region_records = {
'Genève (PCR, {date})': [
dict(ch_part='Genève', date='2021-02-14', is_recent=True, is_pcr=True),
['https://ispmbern.github.io/covid-19/variants/'],
('2021-01-13', 0.1817),
('2021-01-14', 0.09823),
('2021-01-15', 0.1932),
('2021-01-16', 0.2441),
('2021-01-17', 0.2124),
('2021-01-18', 0.2499),
('2021-01-19', 0.2167),
('2021-01-20', 0.1903),
('2021-01-21', 0.1661),
('2021-01-22', 0.2907),
('2021-01-23', 0.2557),
('2021-01-24', 0.3348),
('2021-01-25', 0.2665),
('2021-01-26', 0.4243),
('2021-01-27', 0.4792),
('2021-01-28', 0.4893),
('2021-01-29', 0.5135),
('2021-01-30', 0.558),
('2021-01-31', 0.5749),
('2021-02-01', 0.5002),
('2021-02-02', 0.6163),
('2021-02-03', 0.8583),
('2021-02-04', 0.5307),
('2021-02-05', 0.5474),
('2021-02-06', 0.7215),
('2021-02-07', 0.6295),
('2021-02-08', 0.6842),
('2021-02-09', 0.7279),
('2021-02-10', 0.7943),
],
'Zürich (PCR; {date})': [
dict(ch_part='Zürich', date='2021-02-14', is_recent=True, rebin=3),
['https://ispmbern.github.io/covid-19/variants/'],
('2021-01-06', 0.0007223),
('2021-01-07', 0.03684),
('2021-01-08', 0.01697),
('2021-01-09', -0.0003611),
('2021-01-10', 0.04912),
('2021-01-11', 0.02564),
('2021-01-12', -0.0003611),
('2021-01-13', 0.02961),
('2021-01-14', 0.1116),
('2021-01-15', 0.1434),
('2021-01-16', 0.0003611),
('2021-01-17', 0.08451),
('2021-01-18', -0.0007223),
('2021-01-19', 0.1492),
('2021-01-20', 0.06284),
('2021-01-21', 0.277),
('2021-01-22', 0.05273),
('2021-01-23', 0.2232),
('2021-01-24', 0.1672),
('2021-01-25', 0.2004),
('2021-01-26', 0.1192),
('2021-01-27', 0.2867),
('2021-01-28', 0.1571),
('2021-01-29', 0.08234),
('2021-01-30', 0.2867),
('2021-01-31', 0.2777),
('2021-02-01', 0.2929),
('2021-02-02', 0.1495),
('2021-02-03', -0.0003611),
('2021-02-01', 0.2304),
('2021-02-02', 0.2872),
('2021-02-03', 0.2914),
('2021-02-04', 0.2872),
('2021-02-05', 0.388),
('2021-02-06', 0.3805),
('2021-02-07', 0.4331),
('2021-02-08', 0.453),
('2021-02-09', 0.2219),
('2021-02-10', 0.4466),
]
}
set_date_in_records_keys(region_records)
cdict = {}
meta_records = []
for desc, records in region_records.items():
df = | pd.DataFrame.from_records(records[2:], columns=['sample_date', 'f_b117']) | pandas.DataFrame.from_records |
"""
Wrapper Module to generate molecular descriptors by using other packages
"""
import math
import numpy as np
import os
import pandas as pd
from abc import ABC, abstractmethod
from rdkit import Chem
from rdkit.Chem import AllChem, MACCSkeys
import rdkit.Chem.rdmolops as rdmolops
import rdkit.Chem.rdMolDescriptors as rdDesc
import rdkit.Chem.EState.EState_VSA as EState
from mordred import Calculator as mdCalc
import mordred.descriptors as mdDesc
import mordred.error as mdError
class Descriptors(ABC):
"""
An abstract class for descriptor computation.
Attributes:
Molecule: a rdkit.Chem.rdchem.Mol object, stores the chemical info.
"""
def __init__(self, SMILES = None):
""" Descriptor Constructor """
if(SMILES is not None):
self.set_molecule(SMILES)
else:
self.Molecule = None
def set_molecule(self, SMILES):
""" set molecule of the rdkitDecriptor"""
self.Molecule = Chem.MolFromSmiles(SMILES)
return
def compute_all_descriptors(self):
""" compute descriptors for one molecule"""
pass
@abstractmethod
def batch_compute_all_descriptors(SMILES_list):
""" compute descriptors for a list of molecules, must
implemented as @staticmethod.
"""
pass
class rdkitDescriptors(Descriptors):
"""
A wrapper class using rdkit to generate the different descpritors.
Initilized with SIMLES of a molecule
Attributes:
Molecule: an object of rdkit.Chem.rdchem.Mol
Methods:
set_molecule
compute_all_descriptors
compute_properties
compute_connectivity_and_shape_indexes
compute_MOE_descriptors
compute_MQN_descriptors
compute_Morgan_fingerprint
"""
def compute_all_descriptors(self,desc_type='all'):
"""compute all descriptors avaiable from the rdkit package,
Args:
desc_type: descriptor type, could be 'all', 'int', or 'float'
Return:
desc_dict: descriptor dictionary.
"""
assert desc_type in ['all', 'int','float']
desc_dict = {}
if desc_type == 'all':
desc_dict.update(self.compute_properties())
desc_dict.update(self.compute_connectivity_and_shape_indexes())
desc_dict.update(self.compute_MOE_descriptors())
desc_dict.update(self.compute_MQN_descriptors())
elif desc_type == 'int':
desc_dict.update(self.compute_properties(\
['lipinskiHBA', 'lipinskiHBD', 'NumRotatableBonds', 'NumRings',
'NumHeteroatoms', 'NumAmideBonds','NumAromaticRings', 'NumHBA',
'NumAliphaticRings', 'NumSaturatedRings', 'NumHeterocycles',
'NumAromaticHeterocycles', 'NumSaturatedHeterocycles', 'NumHBD',
'NumAliphaticHeterocycles', 'NumSpiroAtoms', 'NumBridgeheadAtoms',
'NumAtomStereoCenters','NumUnspecifiedAtomStereoCenters']))
desc_dict.update(self.compute_MQN_descriptors())
elif desc_type == 'float':
desc_dict.update(self.compute_properties(\
['exactmw','FractionCSP3','labuteASA','tpsa',
'CrippenClogP','CrippenMR']))
desc_dict.update(self.compute_connectivity_and_shape_indexes())
desc_dict.update(self.compute_MOE_descriptors())
return desc_dict
@staticmethod
def batch_compute_all_descriptors(SMILES_list, desc_type='all'):
"""compute all descriptors avaiable from the rdkit package for a list
of molecules.
Args:
desc_type: descriptor type, could be 'all', 'int', or 'float'
Return:
desc_df: descriptors pandas.DataFrame.
"""
assert len(SMILES_list) >= 1
Molecules = list(map(Chem.MolFromSmiles, SMILES_list))
DESC_ENGINE = rdkitDescriptors()
DESC_ENGINE.set_molecule(SMILES_list[0])
desc_dict = DESC_ENGINE.compute_all_descriptors(desc_type)
desc_df = | pd.DataFrame(desc_dict, index=[0]) | pandas.DataFrame |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from unittest import mock
import datetime as dt
from gs_quant.api.gs.backtests import GsBacktestApi
from gs_quant.backtests.strategy import Strategy
from gs_quant.backtests.triggers import PeriodicTrigger, PeriodicTriggerRequirements, DateTrigger, \
DateTriggerRequirements, AggregateTrigger, PortfolioTrigger, PortfolioTriggerRequirements, \
TriggerDirection
from gs_quant.backtests.actions import EnterPositionQuantityScaledAction, HedgeAction, ExitPositionAction
from gs_quant.backtests.equity_vol_engine import *
from gs_quant.common import Currency, AssetClass
from gs_quant.session import GsSession, Environment
from gs_quant.target.backtests import BacktestResult as APIBacktestResult, Backtest, OptionStyle, OptionType, \
BacktestTradingParameters, BacktestStrategyUnderlier, BacktestStrategyUnderlierHedge, \
VolatilityFlowBacktestParameters, BacktestTradingQuantityType, BacktestSignalSeriesItem
import pandas as pd
def set_session():
from gs_quant.session import OAuth2Session
OAuth2Session.init = mock.MagicMock(return_value=None)
GsSession.use(Environment.QA, 'client_id', 'secret')
def api_mock_data() -> APIBacktestResult:
PNL_data = [
{'date': '2019-02-18', 'value': 0},
{'date': '2019-02-19', 'value': -0.000000000058},
{'date': '2019-02-20', 'value': 0.000000000262}
]
risk_data = {'PNL': PNL_data}
return APIBacktestResult('BT1', risks=risk_data)
def mock_api_response(mocker, mock_result: APIBacktestResult):
mocker.return_value = mock_result
@mock.patch.object(GsBacktestApi, 'run_backtest')
def test_eq_vol_engine_result(mocker):
# 1. setup strategy
start_date = dt.date(2019, 2, 18)
end_date = dt.date(2019, 2, 20)
option = EqOption('.STOXX50E', expirationDate='3m', strikePrice='ATM', optionType=OptionType.Call,
optionStyle=OptionStyle.European)
action = EnterPositionQuantityScaledAction(priceables=option, trade_duration='1m')
trigger = PeriodicTrigger(
trigger_requirements=PeriodicTriggerRequirements(start_date=start_date, end_date=end_date, frequency='1m'),
actions=action)
hedgetrigger = PeriodicTrigger(
trigger_requirements=PeriodicTriggerRequirements(start_date=start_date, end_date=end_date, frequency='B'),
actions=HedgeAction(EqDelta, priceables=option, trade_duration='B'))
strategy = Strategy(initial_portfolio=None, triggers=[trigger, hedgetrigger])
# 2. setup mock api response
mock_api_response(mocker, api_mock_data())
# 3. when run backtest
set_session()
backtest_result = EquityVolEngine.run_backtest(strategy, start_date, end_date)
# 4. assert response
df = pd.DataFrame(api_mock_data().risks[FlowVolBacktestMeasure.PNL.value])
df.date = | pd.to_datetime(df.date) | pandas.to_datetime |
# Copyright (c) 2016-2019, Broad Institute, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Broad Institute, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import itertools
from collections import defaultdict
import numpy
import pandas
logger = logging.getLogger(__name__)
def similarities_to_matrix(similarities, labels, metric='jaccard'):
"""Turn the pairwise similarities into a symmetric matrix."""
label_ix = {label: i for i, label in enumerate(labels)}
matrix = numpy.empty((len(labels), len(labels)))
for kmerset1, kmerset2 in similarities.index:
i = label_ix[kmerset1]
j = label_ix[kmerset2]
if metric == 'subset':
matrix[i, j] = similarities.loc[(kmerset1, kmerset2), 'subset1']
matrix[j, i] = similarities.loc[(kmerset1, kmerset2), 'subset2']
else:
matrix[i, j] = similarities.loc[(kmerset1, kmerset2), metric]
matrix[j, i] = similarities.loc[(kmerset1, kmerset2), metric]
for i in range(len(label_ix)):
matrix[i, i] = numpy.nan
return | pandas.DataFrame(matrix, index=labels, columns=labels) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import pandas as pd
import numpy as np
import json
import zipfile
import matplotlib.pyplot as plt
import seaborn as sns
import re
import nltk
import string
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from textblob import TextBlob
from textblob import Word
import random
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
# In[25]:
# omdb has the ====== imdb_id and the plot is also there in it
# rotten has the imdb_is and rotten_tomato_id
# genres has the wikidata_id
# wikidata has the imdb_id, wiki_id and the rotten_tomato_id
wikidata = pd.read_json('wikidata-movies.json.gz', orient='record', lines=True)
genres = pd.read_json('genres.json.gz', orient='record', lines=True)
rotten = | pd.read_json('rotten-tomatoes.json.gz', orient='record', lines=True) | pandas.read_json |
"""
Provides helper routines for preprocessing.
"""
# License: MIT
from __future__ import absolute_import, division
import numpy as np
import pandas as pd
import scipy.optimize as so
import scipy.stats as ss
from .validation import (is_integer, is_pandas_dataframe,
is_pandas_series)
def get_offset_variable_name(variable, offset):
"""Get placeholder name for lagged variable."""
if offset < 0:
return '{}_lag_{:d}'.format(variable, abs(offset))
if offset == 0:
return variable
return '{}_lead_{:d}'.format(variable, offset)
def _check_presample_length(presample_length, offsets):
"""Check presample length is consistent with the given offsets."""
max_lag = min(offsets)
if presample_length is None:
presample_length = abs(max_lag)
else:
if not is_integer(presample_length) or presample_length < max_lag:
raise ValueError(
'Presample length must be an integer greater '
'than or equal to the maximum lag '
'(got presample_length=%r but min(offsets)=%d)' %
(presample_length, max_lag))
return presample_length
def _construct_lagged_dataframe(variables_and_offsets, data,
presample_length=None,
**kwargs):
"""Construct pandas DataFrame containing lagged variables.
Parameters
----------
variables_and_offsets : list
List of tuples of the form (variable name, offset).
data : pandas DataFrame
Dataframe containing the values of each variable.
presample_length : int
Minimum number of rows to treat as pre-sample values.
Returns
-------
lagged : pandas DataFrame
Dataframe with columns containing offset values
for each variable and offset pair.
"""
if not is_pandas_dataframe(data):
raise ValueError(
'Input data must be a pandas DataFrame '
'(got type(data)=%r)' % type(data))
offsets = [v[1] for v in variables_and_offsets]
presample_length = _check_presample_length(presample_length, offsets)
lagged_series = {
get_offset_variable_name(*v): data[v[0]].shift(
periods=-v[1], **kwargs)
for v in variables_and_offsets}
return pd.DataFrame(lagged_series).iloc[presample_length:]
def construct_lagged_data(variables_and_lags, data, presample_length=None,
**kwargs):
"""Construct dataset containing lagged variables.
Parameters
----------
variables_and_lags : list
List of tuples of the form (variable name, lag).
data : dict-like
Object containing the values of each variable.
presample_length : int
Minimum number of rows to treat as pre-sample values.
Returns
-------
lagged : dict-like
Object with keys corresponding to lagged values
for each variable and lag pair.
"""
if is_pandas_dataframe(data):
return _construct_lagged_dataframe(
variables_and_lags, data, presample_length=presample_length,
**kwargs)
raise NotImplementedError(
'Construction of lagged data not supported for given data type '
'(got type(data)=%r)' % type(data))
def _check_standardization_interval(standardize_by):
"""Check standardization interval is valid."""
valid_intervals = ['dayofyear', 'month']
is_valid = standardize_by is None or standardize_by in valid_intervals
if not is_valid:
raise ValueError(
"Unrecognized standardization interval '%r'" % standardize_by)
return standardize_by
def _standardize_time_series_dataframe(data, base_period=None,
standardize_by=None,
resample=False):
"""Standardize time series of index values."""
if base_period is None:
base_period = [data.index.min(), data.index.max()]
standardize_by = _check_standardization_interval(standardize_by)
if standardize_by is None:
base_period_data = data[(data.index >= base_period[0]) &
(data.index <= base_period[1])]
return (data - base_period_data.mean()) / base_period_data.std(ddof=1)
if standardize_by == 'dayofyear':
must_resample = pd.infer_freq(data.index) not in ('D', '1D')
if resample and must_resample:
data = data.resample('1D').mean()
groups = data.index.dayofyear
else:
must_resample = ( | pd.infer_freq(data.index) | pandas.infer_freq |
import os
import glob
import numpy as np
import pylab as pl
import scipy.io as sio
# for_Jyotika.m
from copy import copy, deepcopy
import pickle
import matplotlib.cm as cm
import pdb
import h5py
import pandas as pd
import bct
from collections import Counter
import matplotlib.cm as cm
import sys
import seaborn as sns
import scipy.stats as sp_stats
sys.path.append("./common/")
import analyze as anal
data_dir = "./data/"
data_target_dir = "./data/"
fig_target_dir = "./Figure2/"
Fig2_panel_name = dict({"modularity_index":"H","participation_pos":"I","module_degree_zscore":"J","local_assortativity_pos_whole":"K"})
subtype = sys.argv[1]
ipsi_contra = sys.argv[2]
if subtype == "subtype":
if ipsi_contra == "n":
graph_prop_df = pd.read_csv(data_dir+"graph_properties_pandas_all.csv")
graph_prop_df_null = | pd.read_csv(data_dir+"graph_properties_pandas_null_all.csv") | pandas.read_csv |
#!/home/wli/env python3
# -*- coding: utf-8 -*-
"""
Title: wsi visualization
=================================
Created: 10-31-2019
Python-Version: 3.5
Description:
------------
This module is used to view the WSI, its mask and heatmap overlay.
Note:
-----
The level of display resolution depends on the memory of the workstation used.
The default level is 5 (32x downsampled from the level 0 WSI image).
"""
import os.path as osp
import openslide
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pandas import DataFrame
import xml.etree.ElementTree as et
import pandas as pd
from skimage.filters import threshold_otsu
import math
import glob
import re
import os
class WSI_viewer(object):
'''
The wsi_viewer class is used to display the wsi image, ground truth,
predicted heatmap and their overlays.
:param WSI_path: the path of a WSI image
:type WSI_path: str
:param Xml_path: the path of a xml file, the ground truth (outline the
tumor regions) of a WSI image
:type Xml_path: str
:param Dimension_path: the path of npy file I generated to store the
dimensions of WSI image and tissue region.
:type Dimension_path: str
:param Mask_truth_path: the path of generated binary mask files from xml
files.
:type Mask_truth_path: str
:param Heatmap_path: the path of predicted heatmap showing the scores of
possible tumor region.
:type Heatmap_path
ivar: contours of tissue region, tumor region (from ground truth), and
heatmap on WSI image.
vartype: png
How to Use:
-----------
- create an instance of the object
for example, viewer = WSI_viewer()
- display the combined contours of tissue region and tumor
region viewer.combined_contour_on_wsi()
- display heatmap over contours of tumor region
viewer.display_heatmap()
- generate binary mask flies:
viewer.mask_generation()
'''
# Class Attribute
slide_level = 4
PPI = 150
# Initializer / Instance Attributes
def __init__(self, WSI_path, Xml_path, Dimension_path, Mask_truth_path='',
Heatmap_path=''):
self.WSI_path = WSI_path
self.Xml_path = Xml_path
self.Mask_truth_path = Mask_truth_path
self.Heatmap_path = Heatmap_path
self.Dimension_path = Dimension_path
# load in the files
self.wsi_image = openslide.open_slide(self.WSI_path)
# ground_truth = openslide.open_slide(ground_truth_dir)
if self.Mask_truth_path:
self.mask_truth = cv2.imread(self.Mask_truth_path)
if self.Heatmap_path:
self.heat_map = np.load(self.Heatmap_path)
self.bbox = np.load(self.Dimension_path)
# read in the wsi image at level 4, downsampled by 16
self.dims = self.wsi_image.dimensions
# dims = wsi_image.level_dimensions[4]
self.wsi_image_thumbnail = np.array(self.wsi_image.read_region((0, 0),
self.slide_level, (int(self.dims[0]/math.pow(2, self.slide_level)), int(self.dims[1]/math.pow(2, self.slide_level)))))
self.wsi_image_thumbnail = self.wsi_image_thumbnail[:, :, :3].astype(
'uint8')
# read in the ground_truth
self.mask_truth = self.mask_truth[:, :, 0].astype('uint8')
# ground_truth_image = np.array(ground_truth.get_thumbnail((dims[0]/16,
# dims[1]/16)))
# Setter and Getter
@property
def WSI_path(self):
return self.__WSI_path
@WSI_path.setter
def WSI_path(self, wsipath):
if os.isdir(wsipath):
self.__WSI_path = wsipath
def tissue_contour_on_wsi(self, output_path):
# read the WSI file, do not use get_thumbnail function. It has bug
# wsi_image = openslide.open_slide(WSI_path)
# dims = wsi_image.dimensions
# thumbnail = wsi_image.read_region((0,0), slide_level,(int(dims[0]/32),
# int(dims[1]/32)))
# thumbnail = np.array(thumbnail)
# thumbnail = thumbnail[:,:,:3]
# thumbnail = thumbnail.astype('uint8')
# drawcontours for tissue regions only
hsv_image = cv2.cvtColor(self.wsi_image_thumbnail, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv_image)
hthresh = threshold_otsu(h)
sthresh = threshold_otsu(s)
# vthresh = threshold_otsu(v)
# be min value for v can be changed later
minhsv = np.array([hthresh, sthresh, 0], np.uint8)
maxhsv = np.array([180, 255, 255], np.uint8)
thresh = [minhsv, maxhsv]
# extraction the countor for tissue
rgbbinary = cv2.inRange(hsv_image, thresh[0], thresh[1])
# plt.imshow(rgbbinary)
rgbbinary = rgbbinary.astype("uint8")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
rgbbinary_close = cv2.morphologyEx(rgbbinary, cv2.MORPH_CLOSE, kernel)
rgbbinary_open = cv2.morphologyEx(
rgbbinary_close, cv2.MORPH_OPEN, kernel)
_, contours, _ = cv2.findContours(
rgbbinary_open, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours_on_wsi = cv2.drawContours(
self.wsi_image_thumbnail, contours, -1, (0, 255, 0), 20)
cv2.imwrite(output_path + "%s.png" %
osp.splitext(osp.basename(self.WSI_path))[0], contours_on_wsi)
return contours
# reader = mir.MultiResolutionImageReader()
# mr_image = reader.open('/home/wli/Downloads/tumor_036.tif')
# Ximageorg, Yimageorg = mr_image.getDimensions()
# dims = mr_image.getLevelDimensions(4)
# Ximage = (Ximage+240//2)//240
# Ximage = 4000
# Yimage = (Yimage+240//2)//240
# Yimage = 2000
# this is a private method used for mask generation
def _convert_xml_df(self):
parseXML = et.parse(self.Xml_path)
root = parseXML.getroot()
dfcols = ['Name', 'Order', 'X', 'Y']
df_xml = pd.DataFrame(columns=dfcols)
for child in root.iter('Annotation'):
for coordinate in child.iter('Coordinate'):
Name = child.attrib.get('Name')
Order = coordinate.attrib.get('Order')
X_coord = float(coordinate.attrib.get('X'))
# X_coord = X_coord - 30000
# X_coord = ((X_coord)*dims[0])/Ximageorg
X_coord = X_coord/32
Y_coord = float(coordinate.attrib.get('Y'))
# Y_coord = Y_coord - 155000
# Y_coord = ((Y_coord)*dims[1])/Yimageorg
Y_coord = Y_coord/32
df_xml = df_xml.append(pd.Series(
[Name, Order, X_coord, Y_coord], index=dfcols), ignore_index=True) # type: DataFrame
df_xml = | pd.DataFrame(df_xml) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import pandas as pd
from numpy import nan, isnan, arange, dtype, zeros
from pandapower.auxiliary import pandapowerNet, get_free_id, _preserve_dtypes
from pandapower.results import reset_results
from pandapower.std_types import add_basic_std_types, load_std_type
from pandapower import __version__
def create_empty_network(name="", f_hz=50., sn_kva=1e3):
"""
This function initializes the pandapower datastructure.
OPTIONAL:
**f_hz** (float, 50.) - power system frequency in hertz
**name** (string, None) - name for the network
**sn_kva** (float, 1e3) - reference apparent power for per unit system
OUTPUT:
**net** (attrdict) - PANDAPOWER attrdict with empty tables:
EXAMPLE:
net = create_empty_network()
"""
net = pandapowerNet({
# structure data
"bus": [('name', dtype(object)),
('vn_kv', 'f8'),
('type', dtype(object)),
('zone', dtype(object)),
('in_service', 'bool'), ],
"load": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("q_kvar", "f8"),
("const_z_percent", "f8"),
("const_i_percent", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"sgen": [("name", dtype(object)),
("bus", "i8"),
("p_kw", "f8"),
("q_kvar", "f8"),
("sn_kva", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"gen": [("name", dtype(object)),
("bus", "u4"),
("p_kw", "f8"),
("vm_pu", "f8"),
("sn_kva", "f8"),
("min_q_kvar", "f8"),
("max_q_kvar", "f8"),
("scaling", "f8"),
("in_service", 'bool'),
("type", dtype(object))],
"switch": [("bus", "i8"),
("element", "i8"),
("et", dtype(object)),
("type", dtype(object)),
("closed", "bool"),
("name", dtype(object))],
"shunt": [("bus", "u4"),
("name", dtype(object)),
("q_kvar", "f8"),
("p_kw", "f8"),
("vn_kv", "f8"),
("step", "u4"),
("max_step", "u4"),
("in_service", "bool")],
"ext_grid": [("name", dtype(object)),
("bus", "u4"),
("vm_pu", "f8"),
("va_degree", "f8"),
("in_service", 'bool')],
"line": [("name", dtype(object)),
("std_type", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("length_km", "f8"),
("r_ohm_per_km", "f8"),
("x_ohm_per_km", "f8"),
("c_nf_per_km", "f8"),
("max_i_ka", "f8"),
("df", "f8"),
("parallel", "u4"),
("type", dtype(object)),
("in_service", 'bool')],
"trafo": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("lv_bus", "u4"),
("sn_kva", "f8"),
("vn_hv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_percent", "f8"),
("vscr_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_st_degree", "f8"),
("tp_pos", "i4"),
("parallel", "u4"),
("df", "f8"),
("in_service", 'bool')],
"trafo3w": [("name", dtype(object)),
("std_type", dtype(object)),
("hv_bus", "u4"),
("mv_bus", "u4"),
("lv_bus", "u4"),
("sn_hv_kva", "u8"),
("sn_mv_kva", "u8"),
("sn_lv_kva", "u8"),
("vn_hv_kv", "f8"),
("vn_mv_kv", "f8"),
("vn_lv_kv", "f8"),
("vsc_hv_percent", "f8"),
("vsc_mv_percent", "f8"),
("vsc_lv_percent", "f8"),
("vscr_hv_percent", "f8"),
("vscr_mv_percent", "f8"),
("vscr_lv_percent", "f8"),
("pfe_kw", "f8"),
("i0_percent", "f8"),
("shift_mv_degree", "f8"),
("shift_lv_degree", "f8"),
("tp_side", dtype(object)),
("tp_mid", "i4"),
("tp_min", "i4"),
("tp_max", "i4"),
("tp_st_percent", "f8"),
("tp_pos", "i4"),
("in_service", 'bool')],
"impedance": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("rft_pu", "f8"),
("xft_pu", "f8"),
("rtf_pu", "f8"),
("xtf_pu", "f8"),
("sn_kva", "f8"),
("in_service", 'bool')],
"dcline": [("name", dtype(object)),
("from_bus", "u4"),
("to_bus", "u4"),
("p_kw", "f8"),
("loss_percent", 'f8'),
("loss_kw", 'f8'),
("vm_from_pu", "f8"),
("vm_to_pu", "f8"),
("max_p_kw", "f8"),
("min_q_from_kvar", "f8"),
("min_q_to_kvar", "f8"),
("max_q_from_kvar", "f8"),
("max_q_to_kvar", "f8"),
("in_service", 'bool')],
"ward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("in_service", "bool")],
"xward": [("name", dtype(object)),
("bus", "u4"),
("ps_kw", "f8"),
("qs_kvar", "f8"),
("qz_kvar", "f8"),
("pz_kw", "f8"),
("r_ohm", "f8"),
("x_ohm", "f8"),
("vm_pu", "f8"),
("in_service", "bool")],
"measurement": [("name", dtype(object)),
("type", dtype(object)),
("element_type", dtype(object)),
("value", "f8"),
("std_dev", "f8"),
("bus", "u4"),
("element", dtype(object))],
"piecewise_linear_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("p", dtype(object)),
("f", dtype(object))],
"polynomial_cost": [("type", dtype(object)),
("element", dtype(object)),
("element_type", dtype(object)),
("c", dtype(object))],
# geodata
"line_geodata": [("coords", dtype(object))],
"bus_geodata": [("x", "f8"), ("y", "f8")],
# result tables
"_empty_res_bus": [("vm_pu", "f8"),
("va_degree", "f8"),
("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_ext_grid": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_line": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8"),
("i_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_trafo3w": [("p_hv_kw", "f8"),
("q_hv_kvar", "f8"),
("p_mv_kw", "f8"),
("q_mv_kvar", "f8"),
("p_lv_kw", "f8"),
("q_lv_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_hv_ka", "f8"),
("i_mv_ka", "f8"),
("i_lv_ka", "f8"),
("loading_percent", "f8")],
"_empty_res_load": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_sgen": [("p_kw", "f8"),
("q_kvar", "f8")],
"_empty_res_gen": [("p_kw", "f8"),
("q_kvar", "f8"),
("va_degree", "f8"),
("vm_pu", "f8")],
"_empty_res_shunt": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_impedance": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("ql_kvar", "f8"),
("i_from_ka", "f8"),
("i_to_ka", "f8")],
"_empty_res_dcline": [("p_from_kw", "f8"),
("q_from_kvar", "f8"),
("p_to_kw", "f8"),
("q_to_kvar", "f8"),
("pl_kw", "f8"),
("vm_from_pu", "f8"),
("va_from_degree", "f8"),
("vm_to_pu", "f8"),
("va_to_degree", "f8")],
"_empty_res_ward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
"_empty_res_xward": [("p_kw", "f8"),
("q_kvar", "f8"),
("vm_pu", "f8")],
# internal
"_ppc": None,
"_is_elements": None,
"_pd2ppc_lookups": {"bus": None,
"ext_grid": None,
"gen": None},
"version": float(__version__[:3]),
"converged": False,
"name": name,
"f_hz": f_hz,
"sn_kva": sn_kva
})
for s in net:
if isinstance(net[s], list):
net[s] = pd.DataFrame(zeros(0, dtype=net[s]), index=[])
add_basic_std_types(net)
reset_results(net)
net['user_pf_options'] = dict()
return net
def create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b",
zone=None, in_service=True, max_vm_pu=nan,
min_vm_pu=nan, **kwargs):
"""create_bus(net, vn_kv, name=None, index=None, geodata=None, type="b", \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds one bus in table net["bus"].
Busses are the nodes of the network that all other elements connect to.
INPUT:
**net** (pandapowerNet) - The pandapower network in which the element is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force a specified ID if it is available. If None, the \
index one higher than the highest already existing index is selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index and index in net["bus"].index:
raise UserWarning("A bus with index %s already exists" % index)
if index is None:
index = get_free_id(net["bus"])
# store dtypes
dtypes = net.bus.dtypes
net.bus.loc[index, ["name", "vn_kv", "type", "zone", "in_service"]] = \
[name, vn_kv, type, zone, bool(in_service)]
# and preserve dtypes
_preserve_dtypes(net.bus, dtypes)
if geodata is not None:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[index, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None,
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan):
"""create_buses(net, nr_buses, vn_kv, index=None, name=None, type="b", geodata=None, \
zone=None, in_service=True, max_vm_pu=nan, min_vm_pu=nan)
Adds several buses in table net["bus"] at once.
Busses are the nodal points of the network that all other elements connect to.
Input:
**net** (pandapowerNet) - The pandapower network in which the element is created
**nr_buses** (int) - The number of buses that is created
OPTIONAL:
**name** (string, default None) - the name for this bus
**index** (int, default None) - Force specified IDs if available. If None, the indeces \
higher than the highest already existing index are selected.
**vn_kv** (float) - The grid voltage level.
**geodata** ((x,y)-tuple, default None) - coordinates used for plotting
**type** (string, default "b") - Type of the bus. "n" - auxilary node,
"b" - busbar, "m" - muff
**zone** (string, None) - grid region
**in_service** (boolean) - True for in_service or False for out of service
**max_vm_pu** (float, NAN) - Maximum bus voltage in p.u. - necessary for OPF
**min_vm_pu** (float, NAN) - Minimum bus voltage in p.u. - necessary for OPF
OUTPUT:
**index** (int) - The unique indices ID of the created elements
EXAMPLE:
create_bus(net, name = "bus1")
"""
if index:
for idx in index:
if idx in net.bus.index:
raise UserWarning("A bus with index %s already exists" % index)
else:
bid = get_free_id(net["bus"])
index = arange(bid, bid + nr_buses, 1)
# TODO: not needed when concating anyways?
# store dtypes
# dtypes = net.bus.dtypes
dd = pd.DataFrame(index=index, columns=net.bus.columns)
dd["vn_kv"] = vn_kv
dd["type"] = type
dd["zone"] = zone
dd["in_service"] = in_service
dd["name"] = name
net["bus"] = pd.concat([net["bus"], dd], axis=0).reindex_axis(net["bus"].columns, axis=1)
# and preserve dtypes
# _preserve_dtypes(net.bus, dtypes)
if geodata:
if len(geodata) != 2:
raise UserWarning("geodata must be given as (x, y) tupel")
net["bus_geodata"].loc[bid, ["x", "y"]] = geodata
if not isnan(min_vm_pu):
if "min_vm_pu" not in net.bus.columns:
net.bus.loc[:, "min_vm_pu"] = pd.Series()
net.bus.loc[index, "min_vm_pu"] = float(min_vm_pu)
if not isnan(max_vm_pu):
if "max_vm_pu" not in net.bus.columns:
net.bus.loc[:, "max_vm_pu"] = pd.Series()
net.bus.loc[index, "max_vm_pu"] = float(max_vm_pu)
return index
def create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan,
name=None, scaling=1., index=None,
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan):
"""create_load(net, bus, p_kw, q_kvar=0, const_z_percent=0, const_i_percent=0, sn_kva=nan, \
name=None, scaling=1., index=None, \
in_service=True, type=None, max_p_kw=nan, min_p_kw=nan, max_q_kvar=nan, \
min_q_kvar=nan, controllable=nan)
Adds one load in table net["load"].
All loads are modelled in the consumer system, meaning load is positive and generation is
negative active power. Please pay attention to the correct signing of the reactive power as
well.
INPUT:
**net** - The net within this load should be created
**bus** (int) - The bus id to which the load is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the load
- postive value -> load
- negative value -> generation
**q_kvar** (float, default 0) - The reactive power of the load
**const_z_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant impedance load at rated voltage
**const_i_percent** (float, default 0) - percentage of p_kw and q_kvar that will be \
associated to constant current load at rated voltage
**sn_kva** (float, default None) - Nominal power of the load
**name** (string, default None) - The name for this load
**scaling** (float, default 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the load
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, default NaN) - Maximum active power load - necessary for controllable \
loads in for OPF
**min_p_kw** (float, default NaN) - Minimum active power load - necessary for controllable \
loads in for OPF
**max_q_kvar** (float, default NaN) - Maximum reactive power load - necessary for \
controllable loads in for OPF
**min_q_kvar** (float, default NaN) - Minimum reactive power load - necessary for \
controllable loads in OPF
**controllable** (boolean, default NaN) - States, whether a load is controllable or not. \
Only respected for OPF
OUTPUT:
**index** (int) - The unique ID of the created element
EXAMPLE:
create_load(net, bus=0, p_kw=10., q_kvar=2.)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["load"])
if index in net["load"].index:
raise UserWarning("A load with the id %s already exists" % id)
# store dtypes
dtypes = net.load.dtypes
net.load.loc[index, ["name", "bus", "p_kw", "const_z_percent", "const_i_percent", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, const_z_percent, const_i_percent, scaling, q_kvar, sn_kva,
bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.load, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.load.columns:
net.load.loc[:, "min_p_kw"] = pd.Series()
net.load.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.load.columns:
net.load.loc[:, "max_p_kw"] = pd.Series()
net.load.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.load.columns:
net.load.loc[:, "min_q_kvar"] = pd.Series()
net.load.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.load.columns:
net.load.loc[:, "max_q_kvar"] = pd.Series()
net.load.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.load.columns:
net.load.loc[:, "controllable"] = pd.Series()
net.load.loc[index, "controllable"] = bool(controllable)
else:
if "controllable" in net.load.columns:
net.load.loc[index, "controllable"] = False
return index
def create_load_from_cosphi(net, bus, sn_kva, cos_phi, mode, **kwargs):
"""
Creates a load element from rated power and power factor cos(phi).
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the load is connected
**sn_kva** (float) - rated power of the load
**cos_phi** (float) - power factor cos_phi
**mode** (str) - "ind" for inductive or "cap" for capacitive behaviour
**kwargs are passed on to the create_load function
OUTPUT:
**index** (int) - The unique ID of the created load
All elements are modeled from a consumer point of view. Active power will therefore always be
positive, reactive power will be negative for inductive behaviour and positive for capacitive
behaviour.
"""
from pandapower.toolbox import pq_from_cosphi
p_kw, q_kvar = pq_from_cosphi(sn_kva, cos_phi, qmode=mode, pmode="load")
return create_load(net, bus, sn_kva=sn_kva, p_kw=p_kw, q_kvar=q_kvar, **kwargs)
def create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=nan, name=None, index=None,
scaling=1., type=None, in_service=True, max_p_kw=nan, min_p_kw=nan,
max_q_kvar=nan, min_q_kvar=nan, controllable=nan, k=nan, rx=nan):
"""create_sgen(net, bus, p_kw, q_kvar=0, sn_kva=nan, name=None, index=None, \
scaling=1., type=None, in_service=True, max_p_kw=nan, min_p_kw=nan, \
max_q_kvar=nan, min_q_kvar=nan, controllable=nan, k=nan, rx=nan)
Adds one static generator in table net["sgen"].
Static generators are modelled as negative PQ loads. This element is used to model generators
with a constant active and reactive power feed-in. If you want to model a voltage controlled
generator, use the generator element instead.
All elements in the grid are modelled in the consumer system, including generators!
If you want to model the generation of power, you have to assign a negative active power
to the generator. Please pay attention to the correct signing of the
reactive power as well.
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the static generator is connected
**p_kw** (float) - The real power of the static generator (negative for generation!)
OPTIONAL:
**q_kvar** (float, default 0) - The reactive power of the sgen
**sn_kva** (float, default None) - Nominal power of the sgen
**name** (string, default None) - The name for this sgen
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**scaling** (float, 1.) - An OPTIONAL scaling factor to be set customly
**type** (string, None) - type variable to classify the static generator
**in_service** (boolean) - True for in_service or False for out of service
**max_p_kw** (float, NaN) - Maximum active power injection - necessary for \
controllable sgens in OPF
**min_p_kw** (float, NaN) - Minimum active power injection - necessary for \
controllable sgens in OPF
**max_q_kvar** (float, NaN) - Maximum reactive power injection - necessary for \
controllable sgens in OPF
**min_q_kvar** (float, NaN) - Minimum reactive power injection - necessary for \
controllable sgens in OPF
**controllable** (bool, NaN) - Whether this generator is controllable by the optimal
powerflow
**k** (float, NaN) - Ratio of nominal current to short circuit current
**rx** (float, NaN) - R/X ratio for short circuit impedance. Only relevant if type is specified as motor so that sgen is treated as asynchronous motor
OUTPUT:
**index** (int) - The unique ID of the created sgen
EXAMPLE:
create_sgen(net, 1, p_kw = -120)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if index is None:
index = get_free_id(net["sgen"])
if index in net["sgen"].index:
raise UserWarning("A static generator with the id %s already exists" % index)
# store dtypes
dtypes = net.sgen.dtypes
net.sgen.loc[index, ["name", "bus", "p_kw", "scaling",
"q_kvar", "sn_kva", "in_service", "type"]] = \
[name, bus, p_kw, scaling, q_kvar, sn_kva, bool(in_service), type]
# and preserve dtypes
_preserve_dtypes(net.sgen, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "min_p_kw"] = pd.Series()
net.sgen.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.sgen.columns:
net.sgen.loc[:, "max_p_kw"] = pd.Series()
net.sgen.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "min_q_kvar"] = pd.Series()
net.sgen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.sgen.columns:
net.sgen.loc[:, "max_q_kvar"] = pd.Series()
net.sgen.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.sgen.columns:
net.sgen.loc[:, "controllable"] = pd.Series()
net.sgen.loc[index, "controllable"] = bool(controllable)
else:
if "controllable" in net.sgen.columns:
net.sgen.loc[index, "controllable"] = False
if not isnan(k):
if "k" not in net.sgen.columns:
net.sgen.loc[:, "k"] = pd.Series()
net.sgen.loc[index, "k"] = float(k)
if not isnan(rx):
if "rx" not in net.sgen.columns:
net.sgen.loc[:, "rx"] = pd.Series()
net.sgen.loc[index, "rx"] = float(rx)
return index
def create_sgen_from_cosphi(net, bus, sn_kva, cos_phi, mode, **kwargs):
"""
Creates an sgen element from rated power and power factor cos(phi).
INPUT:
**net** - The net within this static generator should be created
**bus** (int) - The bus id to which the static generator is connected
**sn_kva** (float) - rated power of the generator
**cos_phi** (float) - power factor cos_phi
**mode** (str) - "ind" for inductive or "cap" for capacitive behaviour
OUTPUT:
**index** (int) - The unique ID of the created sgen
All elements including generators are modeled from a consumer point of view. Active power
will therefore always be negative, reactive power will be negative for inductive behaviour and
positive for capacitive behaviour.
"""
from pandapower.toolbox import pq_from_cosphi
p_kw, q_kvar = pq_from_cosphi(sn_kva, cos_phi, qmode=mode, pmode="gen")
return create_sgen(net, bus, sn_kva=sn_kva, p_kw=p_kw, q_kvar=q_kvar, **kwargs)
def create_gen(net, bus, p_kw, vm_pu=1., sn_kva=nan, name=None, index=None, max_q_kvar=nan,
min_q_kvar=nan, min_p_kw=nan, max_p_kw=nan, scaling=1., type=None,
controllable=nan, vn_kv=nan, xdss=nan, rdss=nan, cos_phi=nan, in_service=True):
"""create_gen(net, bus, p_kw, vm_pu=1., sn_kva=nan, name=None, index=None, max_q_kvar=nan, \
min_q_kvar=nan, min_p_kw=nan, max_p_kw=nan, scaling=1., type=None, \
controllable=nan, vn_kv=nan, xdss=nan, rdss=nan, cos_phi=nan, in_service=True)
Adds a generator to the network.
Generators are always modelled as voltage controlled PV nodes, which is why the input parameter
is active power and a voltage set point. If you want to model a generator as PQ load with fixed
reactive power and variable voltage, please use a static generator instead.
INPUT:
**net** - The net within this generator should be created
**bus** (int) - The bus id to which the generator is connected
OPTIONAL:
**p_kw** (float, default 0) - The real power of the generator (negative for generation!)
**vm_pu** (float, default 0) - The voltage set point of the generator.
**sn_kva** (float, None) - Nominal power of the generator
**name** (string, None) - The name for this generator
**index** (int, None) - Force a specified ID if it is available. If None, the index one \
higher than the highest already existing index is selected.
**scaling** (float, 1.0) - scaling factor which for the active power of the generator
**type** (string, None) - type variable to classify generators
**controllable** (bool, NaN) - Whether this generator is controllable by the optimal
powerflow
**vn_kv** (float, NaN) - Rated voltage of the generator for short-circuit calculation
**xdss** (float, NaN) - Subtransient generator reactance for short-circuit calculation
**rdss** (float, NaN) - Subtransient generator resistance for short-circuit calculation
**cos_phi** (float, NaN) - Rated cosine phi of the generator for short-circuit calculation
**in_service** (bool, True) - True for in_service or False for out of service
**max_p_kw** (float, default NaN) - Maximum active power injection - necessary for OPF
**min_p_kw** (float, default NaN) - Minimum active power injection - necessary for OPF
**max_q_kvar** (float, default NaN) - Maximum reactive power injection - necessary for OPF
**min_q_kvar** (float, default NaN) - Minimum reactive power injection - necessary for OPF
OUTPUT:
**index** (int) - The unique ID of the created generator
EXAMPLE:
create_gen(net, 1, p_kw = -120, vm_pu = 1.02)
"""
if bus not in net["bus"].index.values:
raise UserWarning("Cannot attach to bus %s, bus does not exist" % bus)
if bus in net.ext_grid.bus.values:
raise UserWarning(
"There is already an external grid at bus %u, thus no other voltage " % bus +
"controlling element (ext_grid, gen) is allowed at this bus.")
# if bus in net.gen.bus.values:
# raise UserWarning(
# "There is already a generator at bus %u, only one voltage controlling " % bus +
# "element (ext_grid, gen) is allowed per bus.")
if index is None:
index = get_free_id(net["gen"])
if index in net["gen"].index:
raise UserWarning("A generator with the id %s already exists" % index)
# store dtypes
dtypes = net.gen.dtypes
net.gen.loc[index, ["name", "bus", "p_kw", "vm_pu", "sn_kva", "type", "in_service",
"scaling"]] = [name, bus, p_kw, vm_pu, sn_kva, type, bool(in_service),
scaling]
# and preserve dtypes
_preserve_dtypes(net.gen, dtypes)
if not isnan(min_p_kw):
if "min_p_kw" not in net.gen.columns:
net.gen.loc[:, "min_p_kw"] = pd.Series()
net.gen.loc[index, "min_p_kw"] = float(min_p_kw)
if not isnan(max_p_kw):
if "max_p_kw" not in net.gen.columns:
net.gen.loc[:, "max_p_kw"] = pd.Series()
net.gen.loc[index, "max_p_kw"] = float(max_p_kw)
if not isnan(min_q_kvar):
if "min_q_kvar" not in net.gen.columns:
net.gen.loc[:, "min_q_kvar"] = pd.Series()
net.gen.loc[index, "min_q_kvar"] = float(min_q_kvar)
if not isnan(max_q_kvar):
if "max_q_kvar" not in net.gen.columns:
net.gen.loc[:, "max_q_kvar"] = pd.Series()
net.gen.loc[index, "max_q_kvar"] = float(max_q_kvar)
if not isnan(controllable):
if "controllable" not in net.gen.columns:
net.gen.loc[:, "controllable"] = pd.Series(False)
net.gen.loc[index, "controllable"] = bool(controllable)
elif "controllable" in net.gen.columns:
net.gen.loc[index, "controllable"] = False
if not isnan(vn_kv):
if "vn_kv" not in net.gen.columns:
net.gen.loc[:, "vn_kv"] = pd.Series()
net.gen.loc[index, "vn_kv"] = float(vn_kv)
if not isnan(xdss):
if "xdss" not in net.gen.columns:
net.gen.loc[:, "xdss"] = pd.Series()
net.gen.loc[index, "xdss"] = float(xdss)
if not isnan(rdss):
if "rdss" not in net.gen.columns:
net.gen.loc[:, "rdss"] = | pd.Series() | pandas.Series |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, sparse=sparse)
expected = DataFrame(
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
dtype=np.uint8,
)
if sparse:
expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": SparseArray([0, 1, 0], dtype="uint8"),
"B_b": SparseArray([1, 1, 0], dtype="uint8"),
"B_c": SparseArray([0, 0, 1], dtype="uint8"),
}
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
}
)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ["from_A", "from_B"]
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected = expected[["C"] + cols]
typ = SparseArray if sparse else Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix="bad", sparse=sparse)
bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"]
expected = DataFrame(
[[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]],
columns=["C"] + bad_columns,
dtype=np.uint8,
)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat(
[
Series([1, 2, 3], name="C"),
Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"),
Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"),
Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"),
],
axis=1,
)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse)
expected = DataFrame(
{
"B": ["b", "b", "c"],
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
if sparse:
cols = ["from_A_a", "from_A_b"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep="..", sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"A..a": [1, 0, 1],
"A..b": [0, 1, 0],
"B..b": [1, 1, 0],
"B..c": [0, 0, 1],
},
dtype=np.uint8,
)
expected[["C"]] = df[["C"]]
expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]]
if sparse:
cols = ["A..a", "A..b", "B..b", "B..c"]
expected[cols] = expected[cols].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse)
expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"})
tm.assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=["too few"], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=["bad"], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {"A": "from_A", "B": "from_B"}
df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame(
{
"C": [1, 2, 3],
"from_A_a": [1, 0, 1],
"from_A_b": [0, 1, 0],
"from_B_b": [1, 1, 0],
"from_B_c": [0, 0, 1],
}
)
columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"]
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].astype(SparseDtype("uint8", 0))
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index(
axis=1
)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3, np.nan],
"A_a": arr([1, 0, 1, 0], dtype=typ),
"A_b": arr([0, 1, 0, 0], dtype=typ),
"A_nan": arr([0, 0, 0, 1], dtype=typ),
"B_b": arr([1, 1, 0, 0], dtype=typ),
"B_c": arr([0, 0, 1, 0], dtype=typ),
"B_nan": arr([0, 0, 0, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]]
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df["cat"] = Categorical(["x", "y", "y"])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame(
{
"C": [1, 2, 3],
"A_a": arr([1, 0, 1], dtype=typ),
"A_b": arr([0, 1, 0], dtype=typ),
"B_b": arr([1, 1, 0], dtype=typ),
"B_c": arr([0, 0, 1], dtype=typ),
"cat_x": arr([1, 0, 0], dtype=typ),
"cat_y": arr([0, 1, 1], dtype=typ),
}
).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"get_dummies_kwargs,expected",
[
(
{"data": DataFrame({"ä": ["a"]})},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["ä"]})},
DataFrame({"x_ä": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix": "ä"},
DataFrame({"ä_a": [1]}, dtype=np.uint8),
),
(
{"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"},
DataFrame({"xäa": [1]}, dtype=np.uint8),
),
],
)
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(SparseArray, fill_value=0)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#import seaborn as sns
#import matplotlib.pyplot as plt
#import matplotlib.axes as ax
#import sklearn
#from sklearn.linear_model import LinearRegression
#from sklearn import datasets, linear_model
#from scipy.optimize import curve_fit
#import os
#import collections
#from statsmodels.stats.outliers_influence import summary_table
from pandas.tseries.offsets import *
#import sklearn
#from sklearn import datasets, linear_model
#from sklearn import datasets, linear_model
#import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets, linear_model
import xlwings as xw
def world():
DataTrain = GetDataFrame("RawData_Train",1700,2000)
DataTrain = filterData(DataTrain, "09/04/2019", "09/04/2019", "14:30", "21:00")
DataValid = GetDataFrame("RawData_Train2",1700,2000)
DataValid = filterData(DataValid, "09/05/2019", "09/05/2019", "14:30", "20:59")
###
allData = pd.concat([DataTrain, DataValid]).dropna()
###
#DataReturnTrain = getReturnTrain(DataTrain, 396+10, "MSFT US Equity-Open")
#DataReturnValid = getReturnTrain(DataValid, 396+10, "MSFT US Equity-Open")
lassoDF = getLassoValidDF(allData, 300+10, "MSFT US Equity-Open",0.01)
peerDF = getCoefDF(allData,1, 300+10, "MSFT US Equity-Open",0.01)
peerDF = getContr(peerDF).sort_values(by=["Contribution [%]"], ascending=False)
outsht = xw.Book.caller().sheets["Results"]
outsht.range('A1:F500').value = lassoDF
peersht = xw.Book.caller().sheets["Peers"]
peersht.range('A1:C100').value = peerDF
#
#peersht.range('A1:B100').number_format = '0.0'
#test = xw.Book.caller().sheets["Sheet1"]
#test.range("A2").value = 200
###############################################################################
def showLivePrediction(compName, alpha0):
# prepare training + prediction data set
trainData = prepDataSet()
allData = createNewDF(trainData).dropna(axis=1,how="all")
#get parameters
mainsht = xw.Book.caller().sheets["Main"]
compName0 = compName+'-Open'
#alpha0 = mainsht.range('K2').value
# creat lasso prediction dataframe
lassoDF = getLassoValidDF(allData, len(trainData), compName0,alpha0)
# print the dataframe
outsht = xw.Book.caller().sheets["Results"]
outsht.range('A1:F500').value = ""
outsht.range('A1:F500').value = lassoDF
###############################################################################
def createNewDF(trainDF):
# get parameters
mainsht = xw.Book.caller().sheets["Main"]
dateInterest = str(mainsht.range('I12').value[1:-1])
dateInterest = str(mainsht.range('I12').value[1:-1])
endTimeInterest = str(mainsht.range('I14').value[1:-1])
trainData = prepDataSet().dropna(axis=1,how="all")
liveData0 = GetDataFrame("DataValid",1700,2000)
liveData = filterData(liveData0, dateInterest, dateInterest, endTimeInterest, "21:30")
newDF = pd.concat([trainData, liveData],axis =0).fillna(value=0)
return newDF
###############################################################################
def myPred():
mainsht = xw.Book.caller().sheets["Main"]
dateInterest = str(mainsht.range('I12').value[1:-1])
endTimeInterest = str(mainsht.range('I14').value[1:-1])
liveData0 = GetDataFrame("DataValid",1700,2000).fillna(value=0)
liveData = filterData(liveData0, dateInterest, dateInterest, endTimeInterest, "21:30")
coefListDF = mainsht.range("L1:M100").options(pd.DataFrame, index=True, numbers=float).value
coefListDF = coefListDF[coefListDF.index.isna()==False]
compName = mainsht.range('K1').value+'-Open'
selCompList = coefListDF.index.tolist()
datalist = np.array(liveData[compName]-liveData[compName][0])
tes=np.array([np.sum([liveData[selCompList[i]][j]*coefListDF["Coef"][i] for i in range(len(selCompList)-1)]) for j in range(len(liveData))])
resDF = pd.DataFrame()
resDF["Data"] = datalist
resDF["Basket"] = tes-tes[0]
test = xw.Book.caller().sheets["Sheet1"]
test.range('A1:B10').value = resDF
###############################################################################
def GetPeerParameters(ticker, alpha0):
# get the settings from the Main-tab.
# initialise the Excel sheet
mainsht = xw.Book.caller().sheets["Main"]
# load the desired Name of the stock
#compName0 = mainsht.range('K1').value+'-Open'
compName = str(ticker)+'-Open'
# load the predefined alpha value
#alpha = mainsht.range('K2').value
# clear the output space
mainsht.range('L1:N200').value = ""
# loading the master datafrane and erase columns which dont have any values.
allData = prepDataSet().fillna(method="ffill").fillna(method="bfill").dropna(axis=1,how="all")
# collect Lasso training output
peerDFsummary = getCoefDF(allData,1, len(allData), compName, alpha0)
# calculating the contribution of the peers
peerDF = getContr(peerDFsummary[0]).sort_values(by=["Contribution [%]"], ascending=False)
# Output coefficient dataframe into the space cleaned before
mainsht.range('L1:N100').value = peerDF
# Output the Lasso Model calculated training data
# initialise the tab for training results
trainressht = xw.Book.caller().sheets["Result_Train"]
# clear the space
trainressht.range('A1:E3000').value = ""
# Saving
trainressht.range('A1:E10').value = peerDFsummary[1]
return peerDF.index
# =IFERROR(VLOOKUP(K1,Sheet1!A2:B6,2,FALSE),0.01)
###############################################################################
def prepDataSet():
# get the settings from the Main-tab.
mainsht = xw.Book.caller().sheets["Main"]
startDate = str(mainsht.range('I1').value[1:-1])
endDate = str(mainsht.range('I2').value[1:-1])
startTime = str(mainsht.range('I4').value[1:-1])
endTime = str(mainsht.range('I5').value[1:-1])
dateInterest = str(mainsht.range('I12').value[1:-1])
stTimeInterest = str(mainsht.range('I13').value[1:-1])
endTimeInterest = str(mainsht.range('I14').value[1:-1])
# load RawData_Train tab into DataFrame
DataTrain = GetDataFrame("RawData_Train",5000,2000)
# xlwings doesnt know when to stop loading. Therefore, i have loaded more
# than needed into the dataframe. In order not to run into trouble with data
# filling later, I delete everything with doesnt have an index.
DataTrain = DataTrain[DataTrain.index.isna() == False]
# the data can be constructed of multiple days. Here i only take the time
# when the market is open. Then I apply data filling on empty cells.
DataTrain = filterData(DataTrain, startDate, dateInterest, startTime, endTime)
# Load RawData_Train2 into dataframe
DataTrain2 = GetDataFrame("RawData_Train2",5000,2000)
DataTrain2 = DataTrain2[DataTrain2.index.isna() == False]
DataTrain2 = filterData(DataTrain2, dateInterest, dateInterest, stTimeInterest,endTimeInterest)
# combine both dataframes into the same one
allData = | pd.concat([DataTrain, DataTrain2]) | pandas.concat |
import pandas as pd
import numpy as np
import os
import json
DATA_DIR = "data/"
FILE_NAME = "data.csv"
FINAL_DATA = "rearranged_data.xlsx"
DATA_SPECS = "data_specs.json"
with open(DATA_SPECS, 'r') as f:
DATA_SPECS_DICT = json.load(f)
# Load data
df = pd.read_csv(os.path.join(DATA_DIR, FILE_NAME), delimiter=";")
# function to copy serial
def copy_serial(row):
if not pd.isnull(row["ZG04"]):
row["SERIAL"] = row["ZG04"]
elif not pd.isnull(row["ZG05"]):
row["SERIAL"] = row["ZG05"]
return row
# move serial to serial from w01
df = df.apply(lambda row: copy_serial(row), axis=1)
# Drop lines where we have no serial number
df = df[~ | pd.isnull(df["SERIAL"]) | pandas.isnull |
import pandas as pd
import scipy.signal as scisig
import os
import numpy as np
def get_user_input(prompt):
try:
return raw_input(prompt)
except NameError:
return input(prompt)
def getInputLoadFile():
'''Asks user for type of file and file path. Loads corresponding data.
OUTPUT:
data: DataFrame, index is a list of timestamps at 8Hz, columns include
AccelZ, AccelY, AccelX, Temp, EDA, filtered_eda
'''
print("Please enter information about your EDA file... ")
dataType = 'e4'
if dataType=='q':
filepath = get_user_input("\tFile path: ")
filepath_confirm = filepath
data = loadData_Qsensor(filepath)
elif dataType=='e4':
filepath = get_user_input("\tPath to E4 directory: ")
filepath_confirm = os.path.join(filepath,"EDA.csv")
data = loadData_E4(filepath)
elif dataType=='shimmer':
filepath = get_user_input("\tFile path: ")
filepath_confirm = filepath
data = loadData_shimmer(filepath)
elif dataType=="misc":
filepath = get_user_input("\tFile path: ")
filepath_confirm = filepath
data = loadData_misc(filepath)
else:
print("Error: not a valid file choice")
return data, filepath_confirm
def getOutputPath():
print("")
print("Where would you like to save the computed output file?")
outfile = get_user_input('\tFile name: ')
outputPath = get_user_input('\tFile directory (./ for this directory): ')
fullOutputPath = os.path.join(outputPath,outfile)
if fullOutputPath[-4:] != '.csv':
fullOutputPath = fullOutputPath+'.csv'
return fullOutputPath
def loadData_Qsensor(filepath):
'''
This function loads the Q sensor data, uses a lowpass butterworth filter on the EDA signal
Note: currently assumes sampling rate of 8hz, 16hz, 32hz; if sampling rate is 16hz or 32hz the signal is downsampled
INPUT:
filepath: string, path to input file
OUTPUT:
data: DataFrame, index is a list of timestamps at 8Hz, columns include AccelZ, AccelY, AccelX, Temp, EDA, filtered_eda
'''
# Get header info
try:
header_info = pd.io.parsers.read_csv(filepath, nrows=5)
except IOError:
print("Error!! Couldn't load file, make sure the filepath is correct and you are using a csv from the q sensor software\n\n")
return
# Get sample rate
sampleRate = int((header_info.iloc[3,0]).split(":")[1].strip())
# Get the raw data
data = pd.io.parsers.read_csv(filepath, skiprows=7)
data = data.reset_index()
# Reset the index to be a time and reset the column headers
data.columns = ['AccelZ','AccelY','AccelX','Battery','Temp','EDA']
# Get Start Time
startTime = pd.to_datetime(header_info.iloc[4,0][12:-10])
# Make sure data has a sample rate of 8Hz
data = interpolateDataTo8Hz(data,sampleRate,startTime)
# Remove Battery Column
data = data[['AccelZ','AccelY','AccelX','Temp','EDA']]
# Get the filtered data using a low-pass butterworth filter (cutoff:1hz, fs:8hz, order:6)
data['filtered_eda'] = butter_lowpass_filter(data['EDA'], 1.0, 8, 6)
return data
def _loadSingleFile_E4(filepath,list_of_columns, expected_sample_rate,freq):
# Load data
data = pd.read_csv(filepath)
# Get the startTime and sample rate
startTime = pd.to_datetime(float(data.columns.values[0]),unit="s")
sampleRate = float(data.iloc[0][0])
data = data[data.index!=0]
data.index = data.index-1
# Reset the data frame assuming expected_sample_rate
data.columns = list_of_columns
if sampleRate != expected_sample_rate:
print('ERROR, NOT SAMPLED AT {0}HZ. PROBLEMS WILL OCCUR\n'.format(expected_sample_rate))
# Make sure data has a sample rate of 8Hz
data = interpolateDataTo8Hz(data,sampleRate,startTime)
return data
def loadData_E4(filepath):
# Load EDA data
eda_data = _loadSingleFile_E4(os.path.join(filepath,'EDA.csv'),["EDA"],4,"250L")
# Get the filtered data using a low-pass butterworth filter (cutoff:1hz, fs:8hz, order:6)
eda_data['filtered_eda'] = butter_lowpass_filter(eda_data['EDA'], 1.0, 8, 6)
# Load ACC data
acc_data = _loadSingleFile_E4(os.path.join(filepath,'ACC.csv'),["AccelX","AccelY","AccelZ"],32,"31250U")
# Scale the accelometer to +-2g
acc_data[["AccelX","AccelY","AccelZ"]] = acc_data[["AccelX","AccelY","AccelZ"]]/64.0
# Load Temperature data
temperature_data = _loadSingleFile_E4(os.path.join(filepath,'TEMP.csv'),["Temp"],4,"250L")
data = eda_data.join(acc_data, how='outer')
data = data.join(temperature_data, how='outer')
# E4 sometimes records different length files - adjust as necessary
min_length = min(len(acc_data), len(eda_data), len(temperature_data))
return data[:min_length]
def loadData_shimmer(filepath):
data = pd.read_csv(filepath, sep='\t', skiprows=(0,1))
orig_cols = data.columns
rename_cols = {}
for search, new_col in [['Timestamp','Timestamp'],
['Accel_LN_X', 'AccelX'], ['Accel_LN_Y', 'AccelY'], ['Accel_LN_Z', 'AccelZ'],
['Skin_Conductance', 'EDA']]:
orig = [c for c in orig_cols if search in c]
if len(orig) == 0:
continue
rename_cols[orig[0]] = new_col
data.rename(columns=rename_cols, inplace=True)
# TODO: Assuming no temperature is recorded
data['Temp'] = 0
# Drop the units row and unnecessary columns
data = data[data['Timestamp'] != 'ms']
data.index = | pd.to_datetime(data['Timestamp'], unit='ms') | pandas.to_datetime |
from imutils import face_utils
import dlib
import cv2
import numpy
import sys
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# default solver is incredibly slow which is why it was changed to 'lbfgs'.
logisticRegr = LogisticRegression(solver = 'lbfgs')
p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
#construct your numpy array of data
#image_file = sys.argv[1]
#image = cv2.imread(image_file)
#the function to extract the 2346 features from a photo :
#the pattern I ll be using to store the data :
#data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
#df = pd.DataFrame(data)
#print df
def take_picture(s):
cam = cv2.VideoCapture(0)
while True:
ret, frame = cam.read()
cv2.imshow("test", frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
cam.release()
break
elif k%256 == 32:
# SPACE pressed
img_name = ""+s+".png"
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
### to load image from a file :
#take_picture("new0")
#image = cv2.imread('new0.png')
def load_image(folder_name):
path = folder_name + '/'+str(1)+'.jpg'
#path = 'zakaria/14.jpg'
image = cv2.imread(path)
cv2.imshow("Output", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def extract_test(shap):
#img = cv2.imread(path)
#shap = detect_face(img)
vect = {}
d = 1
for i in range (0,68):
for j in range (i,68):
a = numpy.array((shap[i][0] ,shap[i][1]))
b = numpy.array((shap[j][0] ,shap[j][1]))
#names = names + ["dist"+str(d)]
#distances=distances+[numpy.linalg.norm(a-b)]
col = "dist"+str(d)
val = numpy.linalg.norm(a-b)
vect[col] = val
d = d +1
#return vect
ve = | pd.DataFrame([vect]) | pandas.DataFrame |
import pandas
import numpy as np
from statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = | pandas.DataFrame(X) | pandas.DataFrame |
# %% [markdown]
# ##
import warnings
def noop(*args, **kargs):
pass
warnings.warn = noop
import os
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
from joblib import Parallel, delayed
from scipy.ndimage import gaussian_filter1d
from scipy.optimize import curve_fit
from sklearn.model_selection import ParameterGrid
from tqdm import tqdm
from graspy.match import GraphMatch
from graspy.plot import heatmap
from src.utils import get_paired_inds
from src.data import load_metagraph
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.visualization import CLASS_COLOR_DICT, adjplot
print(scipy.__version__)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
"axes.edgecolor": "lightgrey",
"ytick.color": "grey",
"xtick.color": "grey",
"axes.labelcolor": "dimgrey",
"text.color": "dimgrey",
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
# %% [markdown]
# ##
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
def diag_indices(length, k=0):
neg = False
if k < 0:
neg = True
k = np.abs(k)
inds = (np.arange(length - k), np.arange(k, length))
if neg:
return (inds[1], inds[0])
else:
return inds
def exp_func(k, alpha, beta=1, c=0):
return beta * np.exp(-alpha * (k - 1)) + c
def calc_mean_by_k(ks, perm_adj):
length = len(perm_adj)
ps = []
for k in ks:
p = perm_adj[diag_indices(length, k)].mean()
ps.append(p)
return np.array(ps)
def get_vals_by_k(ks, perm_adj):
ys = []
xs = []
for k in ks:
y = perm_adj[diag_indices(len(perm_adj), k)]
ys.append(y)
x = np.full(len(y), k)
xs.append(x)
return np.concatenate(ys), np.concatenate(xs)
def make_flat_match(length, **kws):
match_mat = np.zeros((length, length))
match_mat[np.triu_indices(length, k=1)] = 1
return match_mat
def make_linear_match(length, offset=0, **kws):
match_mat = np.zeros((length, length))
for k in np.arange(1, length):
match_mat[diag_indices(length, k)] = length - k + offset
return match_mat
def normalize_match(graph, match_mat, method="fro"):
if method == "fro":
match_mat = match_mat / np.linalg.norm(match_mat) * np.linalg.norm(graph)
elif method == "sum":
match_mat = match_mat / np.sum(match_mat) * np.sum(graph)
elif method is None or method is False:
pass
else:
raise ValueError("invalid method")
return match_mat
def make_exp_match(adj, alpha=0.5, beta=1, c=0, norm=False, **kws):
length = len(adj)
match_mat = np.zeros((length, length))
for k in np.arange(1, length):
match_mat[diag_indices(length, k)] = exp_func(k, alpha, beta, c)
match_mat = normalize_match(adj, match_mat, method=norm)
return match_mat
def fit_gm_exp(
adj,
alpha,
beta=1,
c=0,
n_init=5,
norm=False,
max_iter=50,
eps=0.05,
n_jobs=1,
verbose=0,
):
warnings.filterwarnings("ignore")
gm = GraphMatch(
n_init=1, init_method="rand", max_iter=max_iter, eps=eps, shuffle_input=True
)
match_mat = make_exp_match(adj, alpha=alpha, beta=beta, c=c, norm=norm)
seeds = np.random.choice(int(1e8), size=n_init)
def _fit(seed):
np.random.seed(seed)
gm.fit(match_mat, adj)
return gm.perm_inds_, gm.score_
outs = Parallel(n_jobs=n_jobs, verbose=verbose)(delayed(_fit)(s) for s in seeds)
outs = list(zip(*outs))
perms = np.array(outs[0])
scores = np.array(outs[1])
return perms, scores
def get_best_run(perms, scores, n_opts=None):
if n_opts is None:
n_opts = len(perms)
opt_inds = np.random.choice(len(perms), n_opts, replace=False)
perms = perms[opt_inds]
scores = scores[opt_inds]
max_ind = np.argmax(scores)
return perms[max_ind], scores[max_ind]
# %% [markdown]
# ##
np.random.seed(8888)
graph_type = "G"
master_mg = load_metagraph(graph_type)
mg = master_mg.remove_pdiff()
meta = mg.meta
degrees = mg.calculate_degrees()
quant_val = np.quantile(degrees["Total edgesum"], 0.05)
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > quant_val].index
print(quant_val)
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
idx = mg.meta[mg.meta["pair"].isin(mg.meta.index)].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["pair_td"] = meta["pair_id"].map(meta.groupby("pair_id")["Total degree"].mean())
mg = mg.sort_values(["pair_td", "pair_id"], ascending=False)
meta["inds"] = range(len(meta))
adj = mg.adj.copy()
lp_inds, rp_inds = get_paired_inds(meta)
left_inds = meta[meta["left"]]["inds"]
n_pairs = len(lp_inds)
adj = mg.adj
left_adj = adj[np.ix_(lp_inds, lp_inds)]
left_meta = mg.meta.iloc[lp_inds].copy()
right_adj = adj[np.ix_(rp_inds, rp_inds)]
right_meta = mg.meta.iloc[rp_inds].copy()
# %% [markdown]
# ##
np.random.seed(8888)
n_subsample = n_pairs
subsample_inds = np.random.choice(n_pairs, n_subsample, replace=False)
left_adj = left_adj[np.ix_(subsample_inds, subsample_inds)]
left_meta = left_meta.iloc[subsample_inds]
right_adj = right_adj[np.ix_(subsample_inds, subsample_inds)]
right_meta = right_meta.iloc[subsample_inds]
# %% [markdown]
# ##
pal = sns.color_palette("deep", n_colors=8)
left_color = pal[0]
right_color = pal[1]
match_color = pal[2]
def double_adj_plot(left_perm, right_perm, axs=None, titles=True):
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
left_perm_adj = left_adj[np.ix_(left_perm, left_perm)]
left_perm_meta = left_meta.iloc[left_perm]
ax = axs[0]
_, _, top, _ = adjplot(
left_perm_adj,
meta=left_perm_meta,
plot_type="scattermap",
sizes=(1, 10),
ax=ax,
colors="merge_class",
palette=CLASS_COLOR_DICT,
color=left_color,
)
if titles:
top.set_title(r"Left $\to$ left")
right_perm_adj = right_adj[np.ix_(right_perm, right_perm)]
right_perm_meta = right_meta.iloc[right_perm]
ax = axs[1]
_, _, top, _ = adjplot(
right_perm_adj,
meta=right_perm_meta,
plot_type="scattermap",
sizes=(1, 10),
ax=ax,
colors="merge_class",
palette=CLASS_COLOR_DICT,
color=right_color,
)
if titles:
top.set_title(r"Right $\to$ right")
return axs
def rank_corr_plot(left_sort, right_sort, ax=None, show_corr=True):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sns.scatterplot(
x=left_sort, y=right_sort, ax=ax, s=15, linewidth=0, alpha=0.8, color=pal[4]
)
if show_corr:
corr = np.corrcoef(left_sort, right_sort)[0, 1]
ax.text(
0.75, 0.05, f"Corr. = {corr:.2f}", transform=ax.transAxes, color="black"
)
ax.set_xlabel("Left rank", color=left_color)
ax.set_ylabel("Right rank", color=right_color)
ax.set_xticks([])
ax.set_yticks([])
return corr
def plot_diag_vals(adj, ax, color="steelblue", kde=True, **kws):
ks = np.arange(-len(adj) + 1, len(adj))
vals = calc_mean_by_k(ks, adj)
sns.scatterplot(
x=ks, y=vals, s=10, alpha=0.4, linewidth=0, ax=ax, color=color, **kws
)
if kde:
kde_vals = gaussian_filter1d(vals, sigma=25)
sns.lineplot(x=ks, y=kde_vals, ax=ax, color=color)
ax.set_xlabel("Diagonal index")
line_kws = dict(linewidth=1, linestyle="--", color="grey")
ax.axvline(0, **line_kws)
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
ax.yaxis.set_major_locator(plt.MaxNLocator(3))
# %% [markdown]
# ##
n_init = 12 * 4
n_jobs = -2
currtime = time.time()
n_verts = len(left_adj)
halfs = [0.5, 1, 5, 10, 50, 100]
# halfs = [5, 10]
alphas = [np.round(np.log(2) / (h * n_verts), decimals=7) for h in halfs]
print(alphas)
param_grid = {
"alpha": alphas,
"beta": [1, 0.9, 0.7, 0.5, 0.3, 0.1],
"norm": [False, "fro", "sum"],
"c": [0],
}
params = list(ParameterGrid(param_grid))
basename = f"-n_subsample={n_subsample}"
def get_basename(n_subsample=None, alpha=None, beta=None, c=None, norm=None):
return f"-n_subsample={n_subsample}-alpha={alpha}-beta={beta}-c={c}-norm={norm}"
def set_legend_alpha(leg, alpha=1):
for l in leg.legendHandles:
l.set_alpha(alpha)
# %% [markdown]
# ##
rows = []
perm_df = []
for p in tqdm(params):
row = p.copy()
left_row = p.copy()
left_row["train_side"] = "left"
right_row = p.copy()
right_row["train_side"] = "right"
basename = get_basename(n_subsample=n_subsample, **p)
left_perms, left_scores = fit_gm_exp(
left_adj, n_init=n_init, n_jobs=n_jobs, verbose=0, **p
)
right_perms, right_scores = fit_gm_exp(
right_adj, n_init=n_init, n_jobs=n_jobs, verbose=0, **p
)
gm_left_perm, gm_left_score = get_best_run(left_perms, left_scores)
gm_right_perm, gm_right_score = get_best_run(right_perms, right_scores)
left_perm_series = pd.Series(data=gm_left_perm, name=str(left_row))
right_perm_series = pd.Series(data=gm_right_perm, name=str(right_row))
perm_df.append(left_perm_series)
perm_df.append(right_perm_series)
fig, axs = plt.subplots(2, 2, figsize=(20, 20))
double_adj_plot(gm_left_perm, gm_right_perm, axs=axs[0, :])
gm_left_sort = np.argsort(gm_left_perm)
gm_right_sort = np.argsort(gm_right_perm)
ax = axs[1, 0]
corr = rank_corr_plot(gm_left_sort, gm_right_sort, ax=ax)
row["corr"] = corr
ax = axs[1, 1]
left_perm_adj = left_adj[np.ix_(gm_left_perm, gm_left_perm)]
plot_diag_vals(left_perm_adj, ax, color=left_color, label="Left")
right_perm_adj = right_adj[np.ix_(gm_right_perm, gm_right_perm)]
plot_diag_vals(right_perm_adj, ax, color=right_color, label="Right")
match = make_exp_match(left_perm_adj, **p)
plot_diag_vals(match, ax, color=match_color, kde=False, label="Match")
leg = ax.legend(bbox_to_anchor=(0, 1), loc="upper left", markerscale=3)
set_legend_alpha(leg)
fig.suptitle(p, y=0.95)
stashfig(f"match-profile-{p}" + basename)
row["score"] = gm_left_score
row["norm_score"] = gm_left_score / np.linalg.norm(match)
row["match_fro"] = np.linalg.norm(match)
rows.append(row)
time_mins = (time.time() - currtime) / 60
print(f"{time_mins:.2f} minutes elapsed")
res_df = | pd.DataFrame(rows) | pandas.DataFrame |
from flask import Flask
from flask import request
from flask import jsonify
import pandas as pd
import numpy as np
import scipy.spatial
app = Flask(__name__)
@app.route('/flask', methods = ['POST'])
def index():
content = request.get_json()
#print(content)
user = content['user']
orgDF = | pd.json_normalize(content, record_path='orgs') | pandas.json_normalize |
import requests
from bs4 import BeautifulSoup as soup
import pandas as pd
import gspread
from gspread_dataframe import set_with_dataframe
print("Modules imported without an error.")
# sending request to the url
data = requests.get(
"https://en.wikipedia.org/wiki/Template:COVID-19_pandemic_data/India_medical_cases_by_state_and_union_territory")
# parsing the html with beautiful soup
soup = soup(data.text, 'html.parser')
# extracting the relevant data from the html
table = soup.find('div', id='covid19-container')
rows = table.find_all('tr')
# extracting the heading of the states column and correcting the typos within the list comprehension.
columnstates = [v.replace('\n', '') for v in rows[1].find_all('th')[0]]
# extracting the heading of the columns that hold numbers and correcting the typos within the list comprehension.
columns = [v.text.replace('\n', '') for v in rows[1].find_all('th')[1:]]
# making two separate dataframes using pandas with the respective columns
df1 = | pd.DataFrame(columns=columnstates) | pandas.DataFrame |
#%% [markdown]
# # MASE and alignment
# Investigating the use of MASE as a method for joint embedding, and the effects of
# different alignment techniques
#%% [markdown]
# ## Preliminaries
#%%
import datetime
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import merge_axes, simple_scatterplot
from giskard.utils import get_paired_inds
from graspologic.align import OrthogonalProcrustes, SeedlessProcrustes
from graspologic.embed import (
AdjacencySpectralEmbed,
MultipleASE,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspologic.match import GraphMatch
from graspologic.plot import pairplot
from graspologic.utils import (
augment_diagonal,
binarize,
multigraph_lcc_intersection,
pass_to_ranks,
)
from pkg.data import load_maggot_graph
from pkg.io import savefig
from pkg.plot import set_theme
from pkg.utils import set_warnings
from scipy.stats import ortho_group
from sklearn.neighbors import NearestNeighbors
from src.visualization import CLASS_COLOR_DICT as palette
from src.visualization import add_connections, adjplot
t0 = time.time()
set_theme()
def stashfig(name, **kwargs):
foldername = "mase_alignment"
savefig(name, foldername=foldername, **kwargs)
#%%
def plot_pairs(
X,
labels,
n_show=6,
model=None,
left_pair_inds=None,
right_pair_inds=None,
equal=False,
palette=None,
):
"""Plots pairwise dimensional projections, and draws lines between known pair neurons
Parameters
----------
X : [type]
[description]
labels : [type]
[description]
model : [type], optional
[description], by default None
left_pair_inds : [type], optional
[description], by default None
right_pair_inds : [type], optional
[description], by default None
equal : bool, optional
[description], by default False
Returns
-------
[type]
[description]
"""
if n_show is not None:
n_dims = n_show
else:
n_dims = X.shape[1]
fig, axs = plt.subplots(
n_dims - 1, n_dims - 1, sharex=False, sharey=False, figsize=(20, 20)
)
data = pd.DataFrame(data=X[:, :n_dims], columns=[str(i) for i in range(n_dims)])
data["label"] = labels
for i in range(n_dims - 1):
for j in range(n_dims):
ax = axs[i, j - 1]
ax.axis("off")
if i < j:
sns.scatterplot(
data=data,
x=str(j),
y=str(i),
ax=ax,
alpha=0.7,
linewidth=0,
s=8,
legend=False,
hue="label",
palette=palette,
)
if left_pair_inds is not None and right_pair_inds is not None:
add_connections(
data.iloc[left_pair_inds, j],
data.iloc[right_pair_inds, j],
data.iloc[left_pair_inds, i],
data.iloc[right_pair_inds, i],
ax=ax,
)
plt.tight_layout()
return fig, axs
def joint_procrustes(data1, data2, method="orthogonal"):
n = len(data1[0])
if method == "orthogonal":
procruster = OrthogonalProcrustes()
elif method == "seedless":
procruster = SeedlessProcrustes(init="sign_flips")
elif method == "seedless-oracle":
data1 = joint_procrustes(data1, data2, method="orthogonal")
procruster = SeedlessProcrustes(
init="custom", initial_Q=np.eye(data1[0].shape[1])
)
data1 = np.concatenate(data1, axis=0)
data2 = np.concatenate(data2, axis=0)
data1_mapped = procruster.fit_transform(data1, data2)
data1 = (data1_mapped[:n], data1_mapped[n:])
return data1
def prescale_for_embed(adjs):
"""Want to avoid any excess alignment issues simply from the input matrices having
different Frobenius norms"""
norms = [np.linalg.norm(adj, ord="fro") for adj in adjs]
mean_norm = np.mean(norms)
adjs = [adjs[i] * mean_norm / norms[i] for i in range(len(adjs))]
return adjs
def double_heatmap(
matrices,
axs=None,
cbar_ax=None,
figsize=(10, 5),
square=True,
vmin=None,
vmax=None,
center=0,
cmap="RdBu_r",
xticklabels=False,
yticklabels=False,
**kwargs,
):
if axs is None and cbar_ax is None:
fig, axs = plt.subplots(
2, 2, figsize=figsize, gridspec_kw=dict(height_ratios=[0.9, 0.05])
)
cbar_ax = merge_axes(fig, axs, rows=1)
if isinstance(matrices, (list, tuple)):
matrices = np.stack(matrices, axis=0)
if vmax is None:
vmax = np.max(matrices)
if vmin is None:
vmin = np.min(matrices)
heatmap_kws = dict(
square=square,
vmin=vmin,
vmax=vmax,
center=center,
cmap=cmap,
xticklabels=xticklabels,
yticklabels=yticklabels,
)
ax = axs[0, 0]
sns.heatmap(matrices[0], ax=ax, cbar=False, **heatmap_kws)
ax = axs[0, 1]
sns.heatmap(
matrices[1],
ax=ax,
cbar_ax=cbar_ax,
cbar_kws={"orientation": "horizontal", "shrink": 0.6},
**heatmap_kws,
)
return fig, axs
def compute_nn_ranks(left_X, right_X, max_n_neighbors=None, metric="cosine"):
if max_n_neighbors is None:
max_n_neighbors = len(left_X)
nn_kwargs = dict(n_neighbors=max_n_neighbors, metric=metric)
nn_left = NearestNeighbors(**nn_kwargs)
nn_right = NearestNeighbors(**nn_kwargs)
nn_left.fit(left_X)
nn_right.fit(right_X)
left_neighbors = nn_right.kneighbors(left_X, return_distance=False)
right_neighbors = nn_left.kneighbors(right_X, return_distance=False)
arange = np.arange(len(left_X))
_, left_match_rank = np.where(left_neighbors == arange[:, None])
_, right_match_rank = np.where(right_neighbors == arange[:, None])
left_match_rank += 1
right_match_rank += 1
rank_data = np.concatenate((left_match_rank, right_match_rank))
rank_data = pd.Series(rank_data, name="pair_nn_rank")
rank_data = rank_data.to_frame()
rank_data["metric"] = metric
rank_data["side"] = len(left_X) * ["Left"] + len(right_X) * ["Right"]
rank_data["n_components"] = left_X.shape[1]
rank_data["reciprocal_rank"] = 1 / rank_data["pair_nn_rank"]
return rank_data
#%% [markdown]
# ## A simple simulation to validate the joint Procrustes method
#%%
np.random.seed(88888)
n = 32
X1 = np.random.uniform(0.1, 0.9, (n, 2))
Y1 = np.random.multivariate_normal([1, 1], np.eye(2), n)
Q = ortho_group.rvs(2)
X2 = X1 @ Q
Y2 = Y1 @ Q
pred_X2, pred_Y2 = joint_procrustes((X2, Y2), (X1, Y1))
colors = sns.color_palette("deep", 10, desat=1)
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
ax = axs[0]
simple_scatterplot(X1, color=colors[0], alpha=1, ax=ax)
simple_scatterplot(X2, color=colors[1], alpha=1, ax=ax)
simple_scatterplot(
pred_X2, color=colors[3], marker="s", s=50, alpha=0.7, zorder=-1, ax=ax
)
ax.set(title=r"$X$")
ax = axs[1]
simple_scatterplot(Y1, color=colors[0], alpha=1, ax=ax)
simple_scatterplot(Y2, color=colors[1], alpha=1, ax=ax)
simple_scatterplot(
pred_Y2, color=colors[3], marker="s", s=50, alpha=0.7, zorder=-1, ax=ax
)
ax.set(title=r"$Y$")
stashfig("joint-procrustes-demo")
# %% [markdown]
# ## Load and process data
#%%
mg = load_maggot_graph()
mg = mg[mg.nodes["paper_clustered_neurons"]]
ll_mg, rr_mg, lr_mg, rl_mg = mg.bisect(paired=True)
ll_adj = ll_mg.sum.adj.copy()
rr_adj = rr_mg.sum.adj.copy()
nodes = ll_mg.nodes
nodes["_inds"] = range(len(nodes))
sorted_nodes = nodes.sort_values(["simple_group", "merge_class"])
sort_inds = sorted_nodes["_inds"]
ll_adj = ll_adj[np.ix_(sort_inds, sort_inds)]
rr_adj = rr_adj[np.ix_(sort_inds, sort_inds)]
adjs, lcc_inds = multigraph_lcc_intersection([ll_adj, rr_adj], return_inds=True)
ll_adj = adjs[0]
rr_adj = adjs[1]
sorted_nodes = sorted_nodes.iloc[lcc_inds]
print(f"{len(lcc_inds)} in intersection of largest connected components.")
#%% [markdown]
# ## Embed using MASE
#%%
ll_adj = binarize(ll_adj)
rr_adj = binarize(rr_adj)
adjs = prescale_for_embed([ll_adj, rr_adj])
#%%
colors = sns.color_palette("Set1")
side_palette = dict(zip(["Left", "Right"], colors))
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
adjplot(
ll_adj,
plot_type="scattermap",
sizes=(1, 2),
ax=axs[0],
title=r"Left $\to$ left",
color=side_palette["Left"],
)
adjplot(
rr_adj,
plot_type="scattermap",
sizes=(1, 2),
ax=axs[1],
title=r"Right $\to$ right",
color=side_palette["Right"],
)
stashfig("left-right-induced-adjs")
#%%
n_components = 32
mase = MultipleASE(n_components=n_components, algorithm="full")
mase.fit(adjs)
#%% [markdown]
# ### Look at the $\hat{R}$ matrices that MASE estimates
#%%
fig, axs = double_heatmap(mase.scores_)
axs[0, 0].set(title=r"$\hat{R}_{LL}$")
axs[0, 1].set(title=r"$\hat{R}_{RR}$")
stashfig("R-matrices")
#%% [markdown]
# ### Look at the $\hat{P}$ matrices that MASE estimates
U = mase.latent_left_
V = mase.latent_right_
R_ll = mase.scores_[0]
R_rr = mase.scores_[1]
true_phat_ll = U @ R_ll @ V.T
true_phat_rr = U @ R_rr @ V.T
fig, axs = double_heatmap((true_phat_ll, true_phat_rr), figsize=(15, 7.5))
#%% [markdown]
# ## Construct per-node, per-graph representations from MASE with alignment
#%% [markdown]
# ### Which alignment to do?
# - We can either align in the condensed space of the $R$ matrices (after we decompose
# using an SVD) or align in the projected node-wise latent space like we normally would.
# - We can either align the out and in representations jointly (to solve for the same
# $Q$ to be applied to both out and in) or separately.
#%%
scaled = True
for align_space in ["score"]:
for align_mode in ["joint"]:
Z_ll, S_ll, W_ll_T = selectSVD(R_ll, n_components=len(R_ll), algorithm="full")
Z_rr, S_rr, W_rr_T = selectSVD(R_rr, n_components=len(R_rr), algorithm="full")
W_ll = W_ll_T.T
W_rr = W_rr_T.T
S_ll_sqrt = np.diag(np.sqrt(S_ll))
S_rr_sqrt = np.diag(np.sqrt(S_rr))
if scaled:
Z_ll = Z_ll @ S_ll_sqrt
W_ll = W_ll @ S_ll_sqrt
Z_rr = Z_rr @ S_rr_sqrt
W_rr = W_rr @ S_rr_sqrt
if align_space == "score":
if align_mode == "joint":
Z_ll, W_ll = joint_procrustes((Z_ll, W_ll), (Z_rr, W_rr))
else:
op_out = OrthogonalProcrustes()
Z_ll = op_out.fit_transform(Z_ll, Z_rr)
op_in = OrthogonalProcrustes()
W_ll = op_in.fit_transform(W_ll, W_rr)
X_ll = U @ Z_ll
Y_ll = V @ W_ll
X_rr = U @ Z_rr
Y_rr = V @ W_ll
if align_space == "latent":
if align_mode == "joint":
X_ll, Y_ll = joint_procrustes((X_ll, Y_ll), (X_rr, Y_rr))
else:
op_out = OrthogonalProcrustes()
X_ll = op_out.fit_transform(X_ll, X_rr)
op_in = OrthogonalProcrustes()
Y_ll = op_in.fit_transform(Y_ll, Y_rr)
norm = np.sqrt(
np.linalg.norm(X_ll - X_rr) ** 2 + np.linalg.norm(Y_ll - Y_rr) ** 2
)
data = np.concatenate((X_ll, X_rr), axis=0)
left_inds = np.arange(len(X_ll))
right_inds = np.arange(len(X_rr)) + len(X_ll)
labels = sorted_nodes["merge_class"].values
labels = np.concatenate((labels, labels), axis=0)
fig, axs = plot_pairs(
data,
labels,
left_pair_inds=left_inds,
right_pair_inds=right_inds,
palette=palette,
)
fig.suptitle(
f"Align mode = {align_mode}, align space = {align_space}, norm of difference = {norm:0.4f}",
y=1.03,
fontsize="xx-large",
)
phat_ll = X_ll @ Y_ll.T
phat_rr = X_rr @ Y_rr.T
fig, axs = double_heatmap((phat_ll, phat_rr), figsize=(15, 7.6))
#%%
left_composite_adj = np.concatenate((ll_adj, ll_adj.T), axis=1)
right_composite_adj = np.concatenate((rr_adj, rr_adj.T), axis=1)
rank_data = compute_nn_ranks(left_composite_adj, right_composite_adj, metric="jaccard")
adj_mrr = rank_data["reciprocal_rank"].mean()
#%%
X_ll_mase = X_ll
X_rr_mase = X_rr
Y_ll_mase = Y_ll
Y_rr_mase = Y_rr
X_composite_mase = np.block([[X_ll, Y_ll], [X_rr, Y_rr]])
U_mase, _, _ = selectSVD(X_composite_mase, n_components=64, algorithm="full")
n_pairs = len(X_ll)
frames = []
n_components_range = [2, 4, 6, 8, 12, 16, 24, 32]
for n_components in n_components_range:
left_X = U_mase[:n_pairs, :n_components]
right_X = U_mase[n_pairs:, :n_components]
rank_data = compute_nn_ranks(left_X, right_X, metric="cosine")
frames.append(rank_data)
mase_results = pd.concat(frames, ignore_index=True)
mase_results["method"] = "mase"
#%%
ase = AdjacencySpectralEmbed(n_components=16)
X_ll_ase, Y_ll_ase = ase.fit_transform(ll_adj)
X_rr_ase, Y_rr_ase = ase.fit_transform(rr_adj)
X_ll_ase, Y_ll_ase = joint_procrustes(
(X_ll_ase, Y_ll_ase), (X_rr_ase, Y_rr_ase), method="seedless-oracle"
)
X_composite_ase = np.block([[X_ll_ase, Y_ll_ase], [X_rr_ase, Y_rr_ase]])
U_ase, _, _ = selectSVD(X_composite_ase, n_components=32, algorithm="full")
#%%
frames = []
n_components_range = np.arange(1, 32)
for n_components in n_components_range:
left_X = U_ase[:n_pairs, :n_components]
right_X = U_ase[n_pairs:, :n_components]
rank_data = compute_nn_ranks(left_X, right_X, metric="cosine")
frames.append(rank_data)
ase_results = pd.concat(frames, ignore_index=True)
ase_results["method"] = "ase"
#%%
results = | pd.concat((mase_results, ase_results), ignore_index=True) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.